From 34e6f452ff6088073670ccd21220ffa4b05a135a Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 14 Jun 2023 12:27:51 +0200 Subject: [PATCH 001/160] add testclient --- tests/testclient/client.go | 303 +++++++++++++++++++++++++++++++++++++ tests/testclient/models.go | 56 +++++++ 2 files changed, 359 insertions(+) create mode 100644 tests/testclient/client.go create mode 100644 tests/testclient/models.go diff --git a/tests/testclient/client.go b/tests/testclient/client.go new file mode 100644 index 0000000..8e9b123 --- /dev/null +++ b/tests/testclient/client.go @@ -0,0 +1,303 @@ +// Package testclient provides a simple integresql client implementation for test purposes only. +// Please refer to https://github.com/allaboutapps/integresql-client-go +// for a full client implementation to be used in your application. +package testclient + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + + "github.com/allaboutapps/integresql/pkg/manager" + "github.com/allaboutapps/integresql/pkg/util" + _ "github.com/lib/pq" +) + +type ClientConfig struct { + BaseURL string + APIVersion string +} + +func DefaultClientConfigFromEnv() ClientConfig { + return ClientConfig{ + BaseURL: util.GetEnv("INTEGRESQL_CLIENT_BASE_URL", "http://integresql:5000/api"), + APIVersion: util.GetEnv("INTEGRESQL_CLIENT_API_VERSION", "v1"), + } +} + +type Client struct { + baseURL *url.URL + client *http.Client + config ClientConfig +} + +func NewClient(config ClientConfig) (*Client, error) { + c := &Client{ + baseURL: nil, + client: nil, + config: config, + } + + defaultConfig := DefaultClientConfigFromEnv() + + if len(c.config.BaseURL) == 0 { + c.config.BaseURL = defaultConfig.BaseURL + } + + if len(c.config.APIVersion) == 0 { + c.config.APIVersion = defaultConfig.APIVersion + } + + u, err := url.Parse(c.config.BaseURL) + if err != nil { + return nil, err + } + + c.baseURL = u.ResolveReference(&url.URL{Path: path.Join(u.Path, c.config.APIVersion)}) + + c.client = &http.Client{} + + return c, nil +} + +func DefaultClientFromEnv() (*Client, error) { + return NewClient(DefaultClientConfigFromEnv()) +} + +func (c *Client) ResetAllTracking(ctx context.Context) error { + req, err := c.newRequest(ctx, "DELETE", "/admin/templates", nil) + if err != nil { + return err + } + + var msg string + resp, err := c.do(req, &msg) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + return fmt.Errorf("failed to reset all tracking: %v", msg) + } + + return nil +} + +func (c *Client) InitializeTemplate(ctx context.Context, hash string) (TemplateDatabase, error) { + var template TemplateDatabase + + payload := map[string]string{"hash": hash} + + req, err := c.newRequest(ctx, "POST", "/templates", payload) + if err != nil { + return template, err + } + + resp, err := c.do(req, &template) + if err != nil { + return template, err + } + + switch resp.StatusCode { + case http.StatusOK: + return template, nil + case http.StatusLocked: + return template, manager.ErrTemplateAlreadyInitialized + case http.StatusServiceUnavailable: + return template, manager.ErrManagerNotReady + default: + return template, fmt.Errorf("received unexpected HTTP status %d (%s)", resp.StatusCode, resp.Status) + } +} + +func (c *Client) SetupTemplate(ctx context.Context, hash string, init func(conn string) error) error { + template, err := c.InitializeTemplate(ctx, hash) + if err == nil { + if err := init(template.Config.ConnectionString()); err != nil { + return err + } + + return c.FinalizeTemplate(ctx, hash) + } else if err == manager.ErrTemplateAlreadyInitialized { + return nil + } else { + return err + } +} + +func (c *Client) SetupTemplateWithDBClient(ctx context.Context, hash string, init func(db *sql.DB) error) error { + template, err := c.InitializeTemplate(ctx, hash) + if err == nil { + db, err := sql.Open("postgres", template.Config.ConnectionString()) + if err != nil { + return err + } + defer db.Close() + + if err := db.PingContext(ctx); err != nil { + return err + } + + if err := init(db); err != nil { + return err + } + + return c.FinalizeTemplate(ctx, hash) + } else if err == manager.ErrTemplateAlreadyInitialized { + return nil + } else { + return err + } +} + +func (c *Client) DiscardTemplate(ctx context.Context, hash string) error { + req, err := c.newRequest(ctx, "DELETE", fmt.Sprintf("/templates/%s", hash), nil) + if err != nil { + return err + } + + resp, err := c.do(req, nil) + if err != nil { + return err + } + + switch resp.StatusCode { + case http.StatusNoContent: + return nil + case http.StatusNotFound: + return manager.ErrTemplateNotFound + case http.StatusServiceUnavailable: + return manager.ErrManagerNotReady + default: + return fmt.Errorf("received unexpected HTTP status %d (%s)", resp.StatusCode, resp.Status) + } +} + +func (c *Client) FinalizeTemplate(ctx context.Context, hash string) error { + req, err := c.newRequest(ctx, "PUT", fmt.Sprintf("/templates/%s", hash), nil) + if err != nil { + return err + } + + resp, err := c.do(req, nil) + if err != nil { + return err + } + + switch resp.StatusCode { + case http.StatusNoContent: + return nil + case http.StatusNotFound: + return manager.ErrTemplateNotFound + case http.StatusServiceUnavailable: + return manager.ErrManagerNotReady + default: + return fmt.Errorf("received unexpected HTTP status %d (%s)", resp.StatusCode, resp.Status) + } +} + +func (c *Client) GetTestDatabase(ctx context.Context, hash string) (TestDatabase, error) { + var test TestDatabase + + req, err := c.newRequest(ctx, "GET", fmt.Sprintf("/templates/%s/tests", hash), nil) + if err != nil { + return test, err + } + + resp, err := c.do(req, &test) + if err != nil { + return test, err + } + + switch resp.StatusCode { + case http.StatusOK: + return test, nil + case http.StatusNotFound: + return test, manager.ErrTemplateNotFound + case http.StatusGone: + return test, manager.ErrDatabaseDiscarded + case http.StatusServiceUnavailable: + return test, manager.ErrManagerNotReady + default: + return test, fmt.Errorf("received unexpected HTTP status %d (%s)", resp.StatusCode, resp.Status) + } +} + +func (c *Client) ReturnTestDatabase(ctx context.Context, hash string, id int) error { + req, err := c.newRequest(ctx, "DELETE", fmt.Sprintf("/templates/%s/tests/%d", hash, id), nil) + if err != nil { + return err + } + + resp, err := c.do(req, nil) + if err != nil { + return err + } + + switch resp.StatusCode { + case http.StatusNoContent: + return nil + case http.StatusNotFound: + return manager.ErrTemplateNotFound + case http.StatusServiceUnavailable: + return manager.ErrManagerNotReady + default: + return fmt.Errorf("received unexpected HTTP status %d (%s)", resp.StatusCode, resp.Status) + } +} + +func (c *Client) newRequest(ctx context.Context, method string, endpoint string, body interface{}) (*http.Request, error) { + u := c.baseURL.ResolveReference(&url.URL{Path: path.Join(c.baseURL.Path, endpoint)}) + + var buf io.ReadWriter + if body != nil { + buf = new(bytes.Buffer) + if err := json.NewEncoder(buf).Encode(body); err != nil { + return nil, err + } + } + + req, err := http.NewRequestWithContext(ctx, method, u.String(), buf) + if err != nil { + return nil, err + } + + if body != nil { + req.Header.Set("Content-Type", "application/json; charset=UTF-8") + } + + req.Header.Set("Accept", "application/json") + + return req, nil +} + +func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) { + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + + // body must always be closed + defer resp.Body.Close() + + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNoContent { + return resp, nil + } + + // if the provided v pointer is nil we cannot unmarschal the body to anything + if v == nil { + return resp, nil + } + + if err := json.NewDecoder(resp.Body).Decode(v); err != nil { + return nil, err + } + + return resp, err +} diff --git a/tests/testclient/models.go b/tests/testclient/models.go new file mode 100644 index 0000000..c93933a --- /dev/null +++ b/tests/testclient/models.go @@ -0,0 +1,56 @@ +package testclient + +import ( + "fmt" + "sort" + "strings" +) + +type TestDatabase struct { + Database `json:"database"` + + ID int `json:"id"` +} + +type TemplateDatabase struct { + Database `json:"database"` +} + +type Database struct { + TemplateHash string `json:"templateHash"` + Config DatabaseConfig `json:"config"` +} + +type DatabaseConfig struct { + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password"` + Database string `json:"database"` + AdditionalParams map[string]string `json:"additionalParams,omitempty"` // Optional additional connection parameters mapped into the connection string +} + +// Generates a connection string to be passed to sql.Open or equivalents, assuming Postgres syntax +func (c DatabaseConfig) ConnectionString() string { + var b strings.Builder + b.WriteString(fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", c.Host, c.Port, c.Username, c.Password, c.Database)) + + if _, ok := c.AdditionalParams["sslmode"]; !ok { + b.WriteString(" sslmode=disable") + } + + if len(c.AdditionalParams) > 0 { + params := make([]string, 0, len(c.AdditionalParams)) + for param := range c.AdditionalParams { + params = append(params, param) + } + + sort.Strings(params) + + for _, param := range params { + fmt.Fprintf(&b, " %s=%s", param, c.AdditionalParams[param]) + } + } + + return b.String() +} From 20f9e7c61f1b7fc0188907046bf57b01d4a58300 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 14 Jun 2023 12:03:44 +0000 Subject: [PATCH 002/160] update go version to 1.17.13 --- Dockerfile | 4 ++-- go.mod | 10 ++++++++-- go.sum | 38 +++++++++++++++++++++++++------------- 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0b96f57..3684240 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.14.2 AS development +FROM golang:1.17.13 AS development # https://github.com/go-modules-by-example/index/blob/master/010_tools/README.md#walk-through ENV GOBIN /app/bin @@ -49,7 +49,7 @@ RUN wget https://github.com/kyoh86/richgo/releases/download/v0.3.3/richgo_0.3.3_ RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh \ | sh -s -- -b $(go env GOPATH)/bin v1.24.0 -# go swagger: (this package should NOT be installed via go get) +# go swagger: (this package should NOT be installed via go get) # https://github.com/go-swagger/go-swagger/releases RUN curl -o /usr/local/bin/swagger -L'#' \ "https://github.com/go-swagger/go-swagger/releases/download/v0.23.0/swagger_linux_amd64" \ diff --git a/go.mod b/go.mod index 6d76eaa..1230b4d 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,15 @@ module github.com/allaboutapps/integresql go 1.14 require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/google/uuid v1.3.0 + github.com/kr/pretty v0.2.1 // indirect github.com/labstack/echo/v4 v4.1.16 github.com/lib/pq v1.3.0 + github.com/stretchr/testify v1.7.0 golang.org/x/crypto v0.0.0-20200420104511-884d27f42877 // indirect - golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect - golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect + golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect + golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect + golang.org/x/text v0.3.7 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect ) diff --git a/go.sum b/go.sum index 51a1bdc..3830e28 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,15 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o= github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI= github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= @@ -18,38 +26,42 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4= github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200420104511-884d27f42877 h1:IhZPbxNd1UjBCaD5AfpSSbJTRlp+ZSuyuH5uoksNS04= golang.org/x/crypto v0.0.0-20200420104511-884d27f42877/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From d0ee9e7756cb23cee6d49f2d1da0db234d7c489a Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 14 Jun 2023 13:27:31 +0000 Subject: [PATCH 003/160] add BenchmarkGetDatabaseFromNewTemplate --- tests/integresql_test.go | 70 ++++++++++++++++++++++++++++++++++++++ tests/testclient/client.go | 30 ++++++++-------- 2 files changed, 84 insertions(+), 16 deletions(-) create mode 100644 tests/integresql_test.go diff --git a/tests/integresql_test.go b/tests/integresql_test.go new file mode 100644 index 0000000..9d2b8f2 --- /dev/null +++ b/tests/integresql_test.go @@ -0,0 +1,70 @@ +// Package integresql_test provides benchmarks to test integresql performance. +// Before running any of the tests, make sure that integresql is running. +package integresql_test + +import ( + "context" + "database/sql" + "errors" + "testing" + "time" + + "github.com/allaboutapps/integresql/tests/testclient" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func BenchmarkGetDatabaseFromNewTemplate(b *testing.B) { + client, err := testclient.DefaultClientFromEnv() + require.NoError(b, err) + + for i := 0; i < b.N; i++ { + ctx := context.Background() + + newTemplateHash := uuid.NewString() + + err = client.SetupTemplateWithDBClient(ctx, newTemplateHash, func(db *sql.DB) error { + _, err := db.ExecContext(ctx, `CREATE TABLE users ( + id int NOT NULL, + username varchar(255) NOT NULL, + created_at timestamptz NOT NULL, + CONSTRAINT users_pkey PRIMARY KEY (id));`) + require.NoError(b, err) + res, err := db.ExecContext(ctx, ` + INSERT INTO users (id, username, created_at) + VALUES + (1, 'user1', $1), + (2, 'user2', $1); + `, time.Now()) + require.NoError(b, err) + inserted, err := res.RowsAffected() + require.NoError(b, err) + require.Equal(b, int64(2), inserted) + return nil + }) + require.NoError(b, err) + + dbConfig, err := client.GetTestDatabase(ctx, newTemplateHash) + require.NoError(b, err) + db, err := sql.Open("postgres", dbConfig.Config.ConnectionString()) + require.NoError(b, err) + defer db.Close() + + require.NoError(b, db.PingContext(ctx)) + row := db.QueryRowContext(ctx, "SELECT COUNT(id) FROM users;") + require.NoError(b, row.Err()) + var userCnt int + require.NoError(b, row.Scan(&userCnt)) + assert.Equal(b, 2, userCnt) + } +} + +// nolint: deadcode +func ignoreError(toIgnore error, err error) error { + if errors.Is(err, toIgnore) { + return nil + } + + return err +} diff --git a/tests/testclient/client.go b/tests/testclient/client.go index 8e9b123..0553afd 100644 --- a/tests/testclient/client.go +++ b/tests/testclient/client.go @@ -133,27 +133,25 @@ func (c *Client) SetupTemplate(ctx context.Context, hash string, init func(conn func (c *Client) SetupTemplateWithDBClient(ctx context.Context, hash string, init func(db *sql.DB) error) error { template, err := c.InitializeTemplate(ctx, hash) - if err == nil { - db, err := sql.Open("postgres", template.Config.ConnectionString()) - if err != nil { - return err - } - defer db.Close() + if err != nil { + return err + } - if err := db.PingContext(ctx); err != nil { - return err - } + db, err := sql.Open("postgres", template.Config.ConnectionString()) + if err != nil { + return err + } + defer db.Close() - if err := init(db); err != nil { - return err - } + if err := db.PingContext(ctx); err != nil { + return err + } - return c.FinalizeTemplate(ctx, hash) - } else if err == manager.ErrTemplateAlreadyInitialized { - return nil - } else { + if err := init(db); err != nil { return err } + + return c.FinalizeTemplate(ctx, hash) } func (c *Client) DiscardTemplate(ctx context.Context, hash string) error { From 724a832388b2d001bc4764ac0b66490cc6a18247 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 15 Jun 2023 11:04:30 +0000 Subject: [PATCH 004/160] add debug endpoints --- internal/api/server_config.go | 10 ++++++---- internal/router/router.go | 7 +++++++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/internal/api/server_config.go b/internal/api/server_config.go index 8294238..5856064 100644 --- a/internal/api/server_config.go +++ b/internal/api/server_config.go @@ -3,13 +3,15 @@ package api import "github.com/allaboutapps/integresql/pkg/util" type ServerConfig struct { - Address string - Port int + Address string + Port int + DebugEndpoints bool } func DefaultServerConfigFromEnv() ServerConfig { return ServerConfig{ - Address: util.GetEnv("INTEGRESQL_ADDRESS", ""), - Port: util.GetEnvAsInt("INTEGRESQL_PORT", 5000), + Address: util.GetEnv("INTEGRESQL_ADDRESS", ""), + Port: util.GetEnvAsInt("INTEGRESQL_PORT", 5000), + DebugEndpoints: util.GetEnvAsBool("INTEGRESQL_DEBUG_ENDPOINTS", false), } } diff --git a/internal/router/router.go b/internal/router/router.go index e3df24c..35d5505 100644 --- a/internal/router/router.go +++ b/internal/router/router.go @@ -1,6 +1,8 @@ package router import ( + "net/http" + "github.com/allaboutapps/integresql/internal/api" "github.com/allaboutapps/integresql/internal/api/admin" "github.com/allaboutapps/integresql/internal/api/templates" @@ -22,4 +24,9 @@ func Init(s *api.Server) { admin.InitRoutes(s) templates.InitRoutes(s) + + // enable debug endpoints only if requested + if s.Config.DebugEndpoints { + s.Echo.GET("/debug/*", echo.WrapHandler(http.DefaultServeMux)) + } } From d4fc2fb9eec25c4098f2a0ec3d60af689525050d Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 15 Jun 2023 12:04:51 +0000 Subject: [PATCH 005/160] add tracing tasks to manager --- internal/api/server.go | 2 ++ internal/api/server_config.go | 2 +- pkg/manager/manager.go | 31 ++++++++++++++++---- tests/integresql_test.go | 53 ++++++++++++++++++----------------- 4 files changed, 55 insertions(+), 33 deletions(-) diff --git a/internal/api/server.go b/internal/api/server.go index af9b540..23f6450 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -8,6 +8,8 @@ import ( "net" "time" + _ "net/http/pprof" + "github.com/allaboutapps/integresql/pkg/manager" "github.com/allaboutapps/integresql/pkg/util" "github.com/labstack/echo/v4" diff --git a/internal/api/server_config.go b/internal/api/server_config.go index 5856064..41194f9 100644 --- a/internal/api/server_config.go +++ b/internal/api/server_config.go @@ -12,6 +12,6 @@ func DefaultServerConfigFromEnv() ServerConfig { return ServerConfig{ Address: util.GetEnv("INTEGRESQL_ADDRESS", ""), Port: util.GetEnvAsInt("INTEGRESQL_PORT", 5000), - DebugEndpoints: util.GetEnvAsBool("INTEGRESQL_DEBUG_ENDPOINTS", false), + DebugEndpoints: util.GetEnvAsBool("INTEGRESQL_DEBUG_ENDPOINTS", true), } } diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 3b24275..c0837b9 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -5,6 +5,7 @@ import ( "database/sql" "errors" "fmt" + "runtime/trace" "sort" "sync" "time" @@ -138,12 +139,17 @@ func (m *Manager) Initialize(ctx context.Context) error { } func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (*TemplateDatabase, error) { + ctx, task := trace.NewTask(ctx, "initialize_template_db") + defer task.End() + if !m.Ready() { return nil, ErrManagerNotReady } + reg := trace.StartRegion(ctx, "get_template_lock") m.templateMutex.Lock() defer m.templateMutex.Unlock() + reg.End() _, ok := m.templates[hash] @@ -174,24 +180,30 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( m.templates[hash] = template + reg = trace.StartRegion(ctx, "drop_and_create_db") if err := m.dropAndCreateDatabase(ctx, dbName, m.config.ManagerDatabaseConfig.Username, m.config.TemplateDatabaseTemplate); err != nil { delete(m.templates, hash) - // m.templates[hash] = nil return nil, err } + reg.End() return template, nil } func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) error { + ctx, task := trace.NewTask(ctx, "discard_template_db") + defer task.End() + if !m.Ready() { return ErrManagerNotReady } + reg := trace.StartRegion(ctx, "get_template_lock") m.templateMutex.Lock() defer m.templateMutex.Unlock() + reg.End() template, ok := m.templates[hash] @@ -216,19 +228,23 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro } cancel() - // m.templates[hash] = nil delete(m.templates, hash) return nil } func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (*TemplateDatabase, error) { + ctx, task := trace.NewTask(ctx, "finalize_template_db") + defer task.End() + if !m.Ready() { return nil, ErrManagerNotReady } + reg := trace.StartRegion(ctx, "get_template_lock") m.templateMutex.Lock() defer m.templateMutex.Unlock() + reg.End() template, ok := m.templates[hash] @@ -258,11 +274,16 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (*T } func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (*TestDatabase, error) { + ctx, task := trace.NewTask(ctx, "get_test_db") + defer task.End() + if !m.Ready() { return nil, ErrManagerNotReady } + reg := trace.StartRegion(ctx, "get_template_lock") m.templateMutex.RLock() + reg.End() template, ok := m.templates[hash] m.templateMutex.RUnlock() @@ -444,8 +465,7 @@ func (m *Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, func (m *Manager) createDatabase(ctx context.Context, dbName string, owner string, template string) error { - // ts := time.Now() - // fmt.Println("createDatabase", dbName, ts) + defer trace.StartRegion(ctx, "create_db").End() if _, err := m.db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template))); err != nil { return err @@ -456,8 +476,7 @@ func (m *Manager) createDatabase(ctx context.Context, dbName string, owner strin func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { - // ts := time.Now() - // fmt.Println("dropDatabase", dbName, ts) + defer trace.StartRegion(ctx, "drop_db").End() if _, err := m.db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { return err diff --git a/tests/integresql_test.go b/tests/integresql_test.go index 9d2b8f2..5b232a3 100644 --- a/tests/integresql_test.go +++ b/tests/integresql_test.go @@ -16,48 +16,49 @@ import ( ) func BenchmarkGetDatabaseFromNewTemplate(b *testing.B) { + ctx := context.Background() client, err := testclient.DefaultClientFromEnv() require.NoError(b, err) - for i := 0; i < b.N; i++ { - ctx := context.Background() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + newTemplateHash := uuid.NewString() - newTemplateHash := uuid.NewString() - - err = client.SetupTemplateWithDBClient(ctx, newTemplateHash, func(db *sql.DB) error { - _, err := db.ExecContext(ctx, `CREATE TABLE users ( + err := client.SetupTemplateWithDBClient(ctx, newTemplateHash, func(db *sql.DB) error { + _, err := db.ExecContext(ctx, `CREATE TABLE users ( id int NOT NULL, username varchar(255) NOT NULL, created_at timestamptz NOT NULL, CONSTRAINT users_pkey PRIMARY KEY (id));`) - require.NoError(b, err) - res, err := db.ExecContext(ctx, ` + require.NoError(b, err) + res, err := db.ExecContext(ctx, ` INSERT INTO users (id, username, created_at) VALUES (1, 'user1', $1), (2, 'user2', $1); `, time.Now()) + require.NoError(b, err) + inserted, err := res.RowsAffected() + require.NoError(b, err) + require.Equal(b, int64(2), inserted) + return nil + }) require.NoError(b, err) - inserted, err := res.RowsAffected() - require.NoError(b, err) - require.Equal(b, int64(2), inserted) - return nil - }) - require.NoError(b, err) - dbConfig, err := client.GetTestDatabase(ctx, newTemplateHash) - require.NoError(b, err) - db, err := sql.Open("postgres", dbConfig.Config.ConnectionString()) - require.NoError(b, err) - defer db.Close() + dbConfig, err := client.GetTestDatabase(ctx, newTemplateHash) + require.NoError(b, err) + db, err := sql.Open("postgres", dbConfig.Config.ConnectionString()) + require.NoError(b, err) + defer db.Close() - require.NoError(b, db.PingContext(ctx)) - row := db.QueryRowContext(ctx, "SELECT COUNT(id) FROM users;") - require.NoError(b, row.Err()) - var userCnt int - require.NoError(b, row.Scan(&userCnt)) - assert.Equal(b, 2, userCnt) - } + require.NoError(b, db.PingContext(ctx)) + row := db.QueryRowContext(ctx, "SELECT COUNT(id) FROM users;") + require.NoError(b, row.Err()) + var userCnt int + require.NoError(b, row.Scan(&userCnt)) + assert.Equal(b, 2, userCnt) + } + }) } // nolint: deadcode From c04b8ff1b98743edad1aff8dc573df7e86828c6a Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 15 Jun 2023 13:45:20 +0000 Subject: [PATCH 006/160] add context to database functions and trace regions --- pkg/manager/database.go | 29 +++++++++++++++++++++-------- pkg/manager/manager.go | 14 +++++++------- pkg/manager/test_database.go | 12 +++++++----- pkg/manager/testing.go | 10 +++++----- 4 files changed, 40 insertions(+), 25 deletions(-) diff --git a/pkg/manager/database.go b/pkg/manager/database.go index b83b4ca..33faf6b 100644 --- a/pkg/manager/database.go +++ b/pkg/manager/database.go @@ -3,6 +3,7 @@ package manager import ( "context" "errors" + "runtime/trace" "sync" ) @@ -26,14 +27,20 @@ type Database struct { c chan struct{} } -func (d *Database) State() databaseState { +func (d *Database) State(ctx context.Context) databaseState { + reg := trace.StartRegion(ctx, "db_get_state") + defer reg.End() + d.RLock() defer d.RUnlock() return d.state } -func (d *Database) Ready() bool { +func (d *Database) Ready(ctx context.Context) bool { + reg := trace.StartRegion(ctx, "db_check_ready") + defer reg.End() + d.RLock() defer d.RUnlock() @@ -41,8 +48,10 @@ func (d *Database) Ready() bool { } func (d *Database) WaitUntilReady(ctx context.Context) error { + reg := trace.StartRegion(ctx, "db_wait_ready") + defer reg.End() - state := d.State() + state := d.State(ctx) if state == databaseStateReady { return nil @@ -53,7 +62,7 @@ func (d *Database) WaitUntilReady(ctx context.Context) error { for { select { case <-d.c: - state := d.State() + state := d.State(ctx) if state == databaseStateReady { return nil @@ -67,9 +76,11 @@ func (d *Database) WaitUntilReady(ctx context.Context) error { } } -func (d *Database) FlagAsReady() { +func (d *Database) FlagAsReady(ctx context.Context) { + reg := trace.StartRegion(ctx, "db_flag_ready") + defer reg.End() - state := d.State() + state := d.State(ctx) if state != databaseStateInit { return } @@ -84,9 +95,11 @@ func (d *Database) FlagAsReady() { } } -func (d *Database) FlagAsDiscarded() { +func (d *Database) FlagAsDiscarded(ctx context.Context) { + reg := trace.StartRegion(ctx, "db_flag_discarded") + defer reg.End() - state := d.State() + state := d.State(ctx) if state != databaseStateInit { return } diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index c0837b9..38f6d87 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -220,7 +220,7 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro } // discard any still waiting dbs. - template.FlagAsDiscarded() + template.FlagAsDiscarded(ctx) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) if err := template.WaitUntilReady(ctx); err != nil { @@ -253,7 +253,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (*T return nil, ErrTemplateNotFound } - state := template.State() + state := template.State(ctx) // early bailout if we are already ready (multiple calls) if state == databaseStateReady { @@ -265,7 +265,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (*T return nil, ErrDatabaseDiscarded } - template.FlagAsReady() + template.FlagAsReady(ctx) m.wg.Add(1) go m.addTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) @@ -300,7 +300,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (*TestDataba var testDB *TestDatabase for _, db := range template.testDatabases { - if db.ReadyForTest() { + if db.ReadyForTest(ctx) { testDB = db break } @@ -314,7 +314,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (*TestDataba } } - testDB.FlagAsDirty() + testDB.FlagAsDirty(ctx) m.wg.Add(1) go m.addTestDatabasesInBackground(template, 1) @@ -346,7 +346,7 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e for _, db := range template.testDatabases { if db.ID == id { found = true - db.FlagAsClean() + db.FlagAsClean(ctx) break } } @@ -526,7 +526,7 @@ func (m *Manager) createNextTestDatabase(ctx context.Context, template *Template if template.nextTestID > m.config.TestDatabaseMaxPoolSize { i := 0 for idx, db := range template.testDatabases { - if db.Dirty() { + if db.Dirty(ctx) { i = idx break } diff --git a/pkg/manager/test_database.go b/pkg/manager/test_database.go index 1c0628b..f378cf0 100644 --- a/pkg/manager/test_database.go +++ b/pkg/manager/test_database.go @@ -1,5 +1,7 @@ package manager +import "context" + type TestDatabase struct { Database `json:"database"` @@ -8,29 +10,29 @@ type TestDatabase struct { dirty bool } -func (t *TestDatabase) Dirty() bool { +func (t *TestDatabase) Dirty(ctx context.Context) bool { t.RLock() defer t.RUnlock() return t.dirty } -func (t *TestDatabase) FlagAsDirty() { +func (t *TestDatabase) FlagAsDirty(ctx context.Context) { t.Lock() defer t.Unlock() t.dirty = true } -func (t *TestDatabase) FlagAsClean() { +func (t *TestDatabase) FlagAsClean(ctx context.Context) { t.Lock() defer t.Unlock() t.dirty = false } -func (t *TestDatabase) ReadyForTest() bool { - return t.Ready() && !t.Dirty() +func (t *TestDatabase) ReadyForTest(ctx context.Context) bool { + return t.Ready(ctx) && !t.Dirty(ctx) } type ByID []*TestDatabase diff --git a/pkg/manager/testing.go b/pkg/manager/testing.go index 92111e3..f79b4f9 100644 --- a/pkg/manager/testing.go +++ b/pkg/manager/testing.go @@ -31,7 +31,7 @@ func disconnectManager(t *testing.T, m *Manager) { } } -func initTemplateDB(wg *sync.WaitGroup, errs chan<- error, m *Manager) { +func initTemplateDB(ctx context.Context, wg *sync.WaitGroup, errs chan<- error, m *Manager) { defer wg.Done() template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash") @@ -40,7 +40,7 @@ func initTemplateDB(wg *sync.WaitGroup, errs chan<- error, m *Manager) { return } - if template.Ready() { + if template.Ready(ctx) { errs <- errors.New("template database is marked as ready") return } @@ -140,7 +140,7 @@ func verifyTestDB(t *testing.T, test *TestDatabase) { } } -func getTestDB(wg *sync.WaitGroup, errs chan<- error, m *Manager) { +func getTestDB(ctx context.Context, wg *sync.WaitGroup, errs chan<- error, m *Manager) { defer wg.Done() db, err := m.GetTestDatabase(context.Background(), "hashinghash") @@ -149,11 +149,11 @@ func getTestDB(wg *sync.WaitGroup, errs chan<- error, m *Manager) { return } - if !db.Ready() { + if !db.Ready(ctx) { errs <- errors.New("test database is marked as not ready") return } - if !db.Dirty() { + if !db.Dirty(ctx) { errs <- errors.New("test database is not marked as dirty") } From 373fa744c5b78232e838408e80879f3238032823 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 15 Jun 2023 13:45:34 +0000 Subject: [PATCH 007/160] add benchmark for reusing existing template --- tests/integresql_test.go | 52 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/tests/integresql_test.go b/tests/integresql_test.go index 5b232a3..6cb9c2d 100644 --- a/tests/integresql_test.go +++ b/tests/integresql_test.go @@ -57,10 +57,62 @@ func BenchmarkGetDatabaseFromNewTemplate(b *testing.B) { var userCnt int require.NoError(b, row.Scan(&userCnt)) assert.Equal(b, 2, userCnt) + + require.NoError(b, client.ReturnTestDatabase(ctx, newTemplateHash, dbConfig.ID)) + require.NoError(b, client.DiscardTemplate(ctx, newTemplateHash)) } }) } +func BenchmarkGetDatabaseFromExistingTemplate(b *testing.B) { + ctx := context.Background() + client, err := testclient.DefaultClientFromEnv() + require.NoError(b, err) + + newTemplateHash := uuid.NewString() + err = client.SetupTemplateWithDBClient(ctx, newTemplateHash, func(db *sql.DB) error { + _, err := db.ExecContext(ctx, `CREATE TABLE users ( + id int NOT NULL, + username varchar(255) NOT NULL, + created_at timestamptz NOT NULL, + CONSTRAINT users_pkey PRIMARY KEY (id));`) + require.NoError(b, err) + res, err := db.ExecContext(ctx, ` + INSERT INTO users (id, username, created_at) + VALUES + (1, 'user1', $1); + `, time.Now()) + require.NoError(b, err) + inserted, err := res.RowsAffected() + require.NoError(b, err) + require.Equal(b, int64(1), inserted) + return nil + }) + require.NoError(b, err) + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + + dbConfig, err := client.GetTestDatabase(ctx, newTemplateHash) + require.NoError(b, err) + db, err := sql.Open("postgres", dbConfig.Config.ConnectionString()) + require.NoError(b, err) + defer db.Close() + + require.NoError(b, db.PingContext(ctx)) + row := db.QueryRowContext(ctx, "SELECT COUNT(id) FROM users;") + require.NoError(b, row.Err()) + var userCnt int + require.NoError(b, row.Scan(&userCnt)) + assert.Equal(b, 1, userCnt) + + require.NoError(b, client.ReturnTestDatabase(ctx, newTemplateHash, dbConfig.ID)) + } + }) + require.NoError(b, client.DiscardTemplate(ctx, newTemplateHash)) + +} + // nolint: deadcode func ignoreError(toIgnore error, err error) error { if errors.Is(err, toIgnore) { From d42c31c4456dbf833068e2d4fe8defb0371ebe43 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 15 Jun 2023 13:50:48 +0000 Subject: [PATCH 008/160] fix manager tests --- pkg/manager/manager_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index ea0f3a0..a6ce9b9 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -106,7 +106,7 @@ func TestManagerInitializeTemplateDatabase(t *testing.T) { t.Fatalf("failed to initialize template database: %v", err) } - if template.Ready() { + if template.Ready(ctx) { t.Error("template database is marked as ready") } if template.TemplateHash != hash { @@ -151,7 +151,7 @@ func TestManagerInitializeTemplateDatabaseConcurrently(t *testing.T) { wg.Add(templateDBCount) for i := 0; i < templateDBCount; i++ { - go initTemplateDB(&wg, errs, m) + go initTemplateDB(ctx, &wg, errs, m) } wg.Wait() @@ -213,7 +213,7 @@ func TestManagerFinalizeTemplateDatabase(t *testing.T) { t.Fatalf("failed to finalize template database: %v", err) } - if !template.Ready() { + if !template.Ready(ctx) { t.Error("template database is flagged as not ready") } } @@ -299,7 +299,7 @@ func TestManagerGetTestDatabase(t *testing.T) { t.Fatalf("failed to get test database: %v", err) } - if !test.Ready() { + if !test.Ready(ctx) { t.Error("test database is flagged not ready") } @@ -363,11 +363,11 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { return } - if !test.Ready() { + if !test.Ready(ctx) { testCh <- errors.New("test database is flagged as not ready") return } - if !test.Dirty() { + if !test.Dirty(ctx) { testCh <- errors.New("test database is not flagged as dirty") } @@ -443,7 +443,7 @@ func TestManagerGetTestDatabaseConcurrently(t *testing.T) { wg.Add(testDBCount) for i := 0; i < testDBCount; i++ { - go getTestDB(&wg, errs, m) + go getTestDB(ctx, &wg, errs, m) } wg.Wait() @@ -499,7 +499,7 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { wg.Add(testDBCount) for i := 0; i < testDBCount; i++ { - go getTestDB(&wg, errs, m) + go getTestDB(ctx, &wg, errs, m) } if err := m.DiscardTemplateDatabase(ctx, hash); err != nil { @@ -561,7 +561,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { wg.Add(testDBCount) for i := 0; i < testDBCount; i++ { - go getTestDB(&wg, errs, m) + go getTestDB(ctx, &wg, errs, m) } if err := m.DiscardTemplateDatabase(ctx, hash); err != nil { From b573dff9c75f9476b0b8071c0e47dae7616e6a97 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 15 Jun 2023 15:27:26 +0000 Subject: [PATCH 009/160] refactor: remove wg reference from testing.go --- pkg/manager/manager.go | 10 ++++++---- pkg/manager/manager_test.go | 20 ++++++++++++++++---- pkg/manager/testing.go | 7 ++----- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 38f6d87..40e1901 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -178,8 +178,6 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( testDatabases: make([]*TestDatabase, 0), } - m.templates[hash] = template - reg = trace.StartRegion(ctx, "drop_and_create_db") if err := m.dropAndCreateDatabase(ctx, dbName, m.config.ManagerDatabaseConfig.Username, m.config.TemplateDatabaseTemplate); err != nil { delete(m.templates, hash) @@ -188,6 +186,8 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( } reg.End() + m.templates[hash] = template + return template, nil } @@ -268,7 +268,10 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (*T template.FlagAsReady(ctx) m.wg.Add(1) - go m.addTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) + go func() { + defer m.wg.Done() + m.addTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) + }() return template, nil } @@ -550,7 +553,6 @@ func (m *Manager) createNextTestDatabase(ctx context.Context, template *Template // Adds new test databases for a template, intended to be run asynchronously from other operations in a separate goroutine, using the manager's WaitGroup to synchronize for shutdown. // This function will lock `template` until all requested test DBs have been created and signal the WaitGroup about completion afterwards. func (m *Manager) addTestDatabasesInBackground(template *TemplateDatabase, count int) { - defer m.wg.Done() template.Lock() defer template.Unlock() diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index a6ce9b9..5996ded 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -151,7 +151,10 @@ func TestManagerInitializeTemplateDatabaseConcurrently(t *testing.T) { wg.Add(templateDBCount) for i := 0; i < templateDBCount; i++ { - go initTemplateDB(ctx, &wg, errs, m) + go func() { + defer wg.Done() + initTemplateDB(ctx, errs, m) + }() } wg.Wait() @@ -443,7 +446,10 @@ func TestManagerGetTestDatabaseConcurrently(t *testing.T) { wg.Add(testDBCount) for i := 0; i < testDBCount; i++ { - go getTestDB(ctx, &wg, errs, m) + go func() { + defer wg.Done() + getTestDB(ctx, errs, m) + }() } wg.Wait() @@ -499,7 +505,10 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { wg.Add(testDBCount) for i := 0; i < testDBCount; i++ { - go getTestDB(ctx, &wg, errs, m) + go func() { + defer wg.Done() + getTestDB(ctx, errs, m) + }() } if err := m.DiscardTemplateDatabase(ctx, hash); err != nil { @@ -561,7 +570,10 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { wg.Add(testDBCount) for i := 0; i < testDBCount; i++ { - go getTestDB(ctx, &wg, errs, m) + go func() { + defer wg.Done() + getTestDB(ctx, errs, m) + }() } if err := m.DiscardTemplateDatabase(ctx, hash); err != nil { diff --git a/pkg/manager/testing.go b/pkg/manager/testing.go index f79b4f9..14ecdac 100644 --- a/pkg/manager/testing.go +++ b/pkg/manager/testing.go @@ -4,7 +4,6 @@ import ( "context" "database/sql" "errors" - "sync" "testing" "time" ) @@ -31,8 +30,7 @@ func disconnectManager(t *testing.T, m *Manager) { } } -func initTemplateDB(ctx context.Context, wg *sync.WaitGroup, errs chan<- error, m *Manager) { - defer wg.Done() +func initTemplateDB(ctx context.Context, errs chan<- error, m *Manager) { template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash") if err != nil { @@ -140,8 +138,7 @@ func verifyTestDB(t *testing.T, test *TestDatabase) { } } -func getTestDB(ctx context.Context, wg *sync.WaitGroup, errs chan<- error, m *Manager) { - defer wg.Done() +func getTestDB(ctx context.Context, errs chan<- error, m *Manager) { db, err := m.GetTestDatabase(context.Background(), "hashinghash") if err != nil { From 359b2a83645229db9c20954df510a9b89d24d6c2 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 16 Jun 2023 08:51:37 +0000 Subject: [PATCH 010/160] add template config --- pkg/manager/template_database.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/manager/template_database.go b/pkg/manager/template_database.go index 39bfc4c..de56d0a 100644 --- a/pkg/manager/template_database.go +++ b/pkg/manager/template_database.go @@ -1,5 +1,15 @@ package manager +type TemplateConfig struct { + TemplateHash string `json:"templateHash"` + Config DatabaseConfig `json:"config"` + nextTestID int +} + +func (c TemplateConfig) IsEmpty() bool { + return c.TemplateHash == "" +} + type TemplateDatabase struct { Database `json:"database"` From 00a152f006cefbebea53cbdc9e05910dff96080a Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 16 Jun 2023 11:05:30 +0000 Subject: [PATCH 011/160] add pool GetReadyDB and AddTestDatabase --- pkg/manager/my/database.go | 107 +++++++++++++++++++++++++ pkg/manager/my/database_config.go | 41 ++++++++++ pkg/manager/my/pool.go | 117 ++++++++++++++++++++++++++++ pkg/manager/my/template_database.go | 17 ++++ pkg/manager/my/test_database.go | 7 ++ 5 files changed, 289 insertions(+) create mode 100644 pkg/manager/my/database.go create mode 100644 pkg/manager/my/database_config.go create mode 100644 pkg/manager/my/pool.go create mode 100644 pkg/manager/my/template_database.go create mode 100644 pkg/manager/my/test_database.go diff --git a/pkg/manager/my/database.go b/pkg/manager/my/database.go new file mode 100644 index 0000000..3bd29c3 --- /dev/null +++ b/pkg/manager/my/database.go @@ -0,0 +1,107 @@ +package manager + +import ( + "errors" +) + +type databaseState int + +const ( + databaseStateInit databaseState = iota + databaseStateDiscarded databaseState = iota + databaseStateReady databaseState = iota +) + +var ErrDatabaseDiscarded = errors.New("ErrDatabaseDiscarded") + +type Database struct { + TemplateHash string `json:"templateHash"` + Config DatabaseConfig `json:"config"` +} + +// func (d *Database) State(ctx context.Context) databaseState { +// reg := trace.StartRegion(ctx, "db_get_state") +// defer reg.End() + +// d.RLock() +// defer d.RUnlock() + +// return d.state +// } + +// func (d *Database) Ready(ctx context.Context) bool { +// reg := trace.StartRegion(ctx, "db_check_ready") +// defer reg.End() + +// d.RLock() +// defer d.RUnlock() + +// return d.state == databaseStateReady +// } + +// func (d *Database) WaitUntilReady(ctx context.Context) error { +// reg := trace.StartRegion(ctx, "db_wait_ready") +// defer reg.End() + +// state := d.State(ctx) + +// if state == databaseStateReady { +// return nil +// } else if state == databaseStateDiscarded { +// return ErrDatabaseDiscarded +// } + +// for { +// select { +// case <-d.c: +// state := d.State(ctx) + +// if state == databaseStateReady { +// return nil +// } else if state == databaseStateDiscarded { +// return ErrDatabaseDiscarded +// } + +// case <-ctx.Done(): +// return ctx.Err() +// } +// } +// } + +// func (d *Database) FlagAsReady(ctx context.Context) { +// reg := trace.StartRegion(ctx, "db_flag_ready") +// defer reg.End() + +// state := d.State(ctx) +// if state != databaseStateInit { +// return +// } + +// d.Lock() +// defer d.Unlock() + +// d.state = databaseStateReady + +// if d.c != nil { +// close(d.c) +// } +// } + +// func (d *Database) FlagAsDiscarded(ctx context.Context) { +// reg := trace.StartRegion(ctx, "db_flag_discarded") +// defer reg.End() + +// state := d.State(ctx) +// if state != databaseStateInit { +// return +// } + +// d.Lock() +// defer d.Unlock() + +// d.state = databaseStateDiscarded + +// if d.c != nil { +// close(d.c) +// } +// } diff --git a/pkg/manager/my/database_config.go b/pkg/manager/my/database_config.go new file mode 100644 index 0000000..4c351ae --- /dev/null +++ b/pkg/manager/my/database_config.go @@ -0,0 +1,41 @@ +package manager + +import ( + "fmt" + "sort" + "strings" +) + +type DatabaseConfig struct { + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password"` + Database string `json:"database"` + AdditionalParams map[string]string `json:"additionalParams,omitempty"` // Optional additional connection parameters mapped into the connection string +} + +// Generates a connection string to be passed to sql.Open or equivalents, assuming Postgres syntax +func (c DatabaseConfig) ConnectionString() string { + var b strings.Builder + b.WriteString(fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", c.Host, c.Port, c.Username, c.Password, c.Database)) + + if _, ok := c.AdditionalParams["sslmode"]; !ok { + b.WriteString(" sslmode=disable") + } + + if len(c.AdditionalParams) > 0 { + params := make([]string, 0, len(c.AdditionalParams)) + for param := range c.AdditionalParams { + params = append(params, param) + } + + sort.Strings(params) + + for _, param := range params { + fmt.Fprintf(&b, " %s=%s", param, c.AdditionalParams[param]) + } + } + + return b.String() +} diff --git a/pkg/manager/my/pool.go b/pkg/manager/my/pool.go new file mode 100644 index 0000000..b452cb2 --- /dev/null +++ b/pkg/manager/my/pool.go @@ -0,0 +1,117 @@ +package manager + +import ( + "context" + "errors" + "fmt" + "sync" +) + +var ( + ErrPoolEmpty = errors.New("no database exists for this hash") + ErrPoolFull = errors.New("database pool is full") +) + +type DBPool struct { + ready map[string]*singleHashDBPool // map[hash] + dirty map[string]*singleHashDBPool // map[hash][db ID] + sync.RWMutex + + poolSize int +} + +type singleHashDBPool struct { + dbs []TestDatabase + index int + + sync.RWMutex +} + +func newSingleHashDBPool(poolSize int) *singleHashDBPool { + return &singleHashDBPool{ + dbs: make([]TestDatabase, poolSize), + index: -1, + } +} + +func (p *DBPool) GetReadyDB(ctx context.Context, hash string) (TestDatabase, error) { + var readyDBs *singleHashDBPool + { + // + // DBPool locked + p.Lock() + defer p.Unlock() + readyDBs = p.ready[hash] + if readyDBs == nil { + return TestDatabase{}, ErrPoolEmpty + } + } + + // + // singleHashDBPool locked + // + readyDBs.Lock() + defer readyDBs.Unlock() + + // if index is negative, there are no ready DBs + if readyDBs.index < 0 { + return TestDatabase{}, ErrPoolEmpty + } + + // pick a test database from the index + // and decrease the index value - this database is now 'dirty' + testDB := readyDBs.dbs[readyDBs.index] + readyDBs.index-- + + // add it to the collection of dirty DB + + return testDB, nil +} + +func (p *DBPool) AddTestDatabase(ctx context.Context, template TemplateConfig, dbNamePrefix string) (TestDatabase, error) { + var readyDBs *singleHashDBPool + { + // + // DBPool locked + p.Lock() + defer p.Unlock() + + readyDBs, ok := p.ready[template.TemplateHash] + if ok { + // if exists already, check if pool size is not exceeded + if readyDBs.index+1 >= p.poolSize { + return TestDatabase{}, ErrPoolFull + } + + } else { + // add newSingleHashDBPool if doesn't exist already + readyDBs = newSingleHashDBPool(p.poolSize) + p.ready[template.TemplateHash] = readyDBs + } + } + + // + // singleHashDBPool locked + // + readyDBs.Lock() + defer readyDBs.Unlock() + + // index points now to the DB to be added + readyDBs.index++ + + // prepare test database structure based on the template + newTestDB := TestDatabase{ + Database: Database{ + TemplateHash: template.TemplateHash, + Config: template.Config, + }, + ID: readyDBs.index, + } + dbName := fmt.Sprintf("%s%03d", dbNamePrefix, readyDBs.index) + newTestDB.Database.Config.Database = dbName + + // add new database to ready pool + readyDBs.dbs[readyDBs.index] = newTestDB + + return newTestDB, nil +} diff --git a/pkg/manager/my/template_database.go b/pkg/manager/my/template_database.go new file mode 100644 index 0000000..e0d491c --- /dev/null +++ b/pkg/manager/my/template_database.go @@ -0,0 +1,17 @@ +package manager + +type TemplateConfig struct { + TemplateHash string `json:"templateHash"` + Config DatabaseConfig `json:"config"` +} + +func (c TemplateConfig) IsEmpty() bool { + return c.TemplateHash == "" +} + +type TemplateDatabase struct { + Database `json:"database"` + + nextTestID int + testDatabases []*TestDatabase +} diff --git a/pkg/manager/my/test_database.go b/pkg/manager/my/test_database.go new file mode 100644 index 0000000..4cc602a --- /dev/null +++ b/pkg/manager/my/test_database.go @@ -0,0 +1,7 @@ +package manager + +type TestDatabase struct { + Database `json:"database"` + + ID int `json:"id"` +} From c4d0034e5a83537499a5ed2a774fbb950bee3f75 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 16 Jun 2023 18:47:13 +0000 Subject: [PATCH 012/160] implement using pool per hash --- pkg/manager/my/pool.go | 207 +++++++++++++++++++++++++++-------------- 1 file changed, 138 insertions(+), 69 deletions(-) diff --git a/pkg/manager/my/pool.go b/pkg/manager/my/pool.go index b452cb2..c28d21c 100644 --- a/pkg/manager/my/pool.go +++ b/pkg/manager/my/pool.go @@ -8,110 +8,179 @@ import ( ) var ( - ErrPoolEmpty = errors.New("no database exists for this hash") - ErrPoolFull = errors.New("database pool is full") + ErrPoolEmpty = errors.New("no database exists for this hash") + ErrPoolFull = errors.New("database pool is full") + ErrNotInPool = errors.New("database is not in the pool") + ErrNoDBReady = errors.New("no database is currently ready, perhaps you need to create one") + ErrInvalidIndex = errors.New("invalid index == poor implementation :(") ) type DBPool struct { - ready map[string]*singleHashDBPool // map[hash] - dirty map[string]*singleHashDBPool // map[hash][db ID] + pool map[string]*dbHashPool // map[hash] + ready map[string][]int // map[hash] + dirty map[string]map[int]bool // map[hash] sync.RWMutex - poolSize int + maxPoolSize int } -type singleHashDBPool struct { - dbs []TestDatabase - index int +func NewDBPool(maxPoolSize int) *DBPool { + return &DBPool{ + pool: make(map[string]*dbHashPool), + ready: make(map[string][]int), + dirty: make(map[string]map[int]bool), + maxPoolSize: maxPoolSize, + } +} + +type dbHashPool struct { + dbs []TestDatabase sync.RWMutex } -func newSingleHashDBPool(poolSize int) *singleHashDBPool { - return &singleHashDBPool{ - dbs: make([]TestDatabase, poolSize), - index: -1, +func newDBHashPool(maxPoolSize int) *dbHashPool { + return &dbHashPool{ + dbs: make([]TestDatabase, 0, maxPoolSize), } } func (p *DBPool) GetReadyDB(ctx context.Context, hash string) (TestDatabase, error) { - var readyDBs *singleHashDBPool + var hashDBs *dbHashPool + var index int + + // ! + // DBPool locked { - // - // DBPool locked p.Lock() defer p.Unlock() - readyDBs = p.ready[hash] - if readyDBs == nil { + + ready := p.ready[hash] + if len(ready) == 0 { + return TestDatabase{}, ErrNoDBReady + } + + // get and remove last 'ready' index + index = ready[len(ready)-1] + ready = ready[:len(ready)-1] + p.ready[hash] = ready + + // sanity check, should never happen + if index >= p.maxPoolSize { + return TestDatabase{}, ErrInvalidIndex + } + + hashDBs = p.pool[hash] + if hashDBs == nil { + // should not happen return TestDatabase{}, ErrPoolEmpty } - } - // - // singleHashDBPool locked - // - readyDBs.Lock() - defer readyDBs.Unlock() + // add the index to 'dirty' + dirty := p.dirty[hash] + if dirty == nil { + dirty = make(map[int]bool) + } + dirty[index] = true + p.dirty[hash] = dirty - // if index is negative, there are no ready DBs - if readyDBs.index < 0 { - return TestDatabase{}, ErrPoolEmpty + // ! + // dbHashPool locked before unlocking DBPool + hashDBs.Lock() } + // DBPool unlocked + // ! + defer hashDBs.Unlock() - // pick a test database from the index - // and decrease the index value - this database is now 'dirty' - testDB := readyDBs.dbs[readyDBs.index] - readyDBs.index-- - - // add it to the collection of dirty DB + // pick a ready test database from the index + if len(hashDBs.dbs) <= index { + return TestDatabase{}, ErrInvalidIndex + } - return testDB, nil + return hashDBs.dbs[index], nil + // dbHashPool unlocked + // ! } func (p *DBPool) AddTestDatabase(ctx context.Context, template TemplateConfig, dbNamePrefix string) (TestDatabase, error) { - var readyDBs *singleHashDBPool + var newTestDB TestDatabase + hash := template.TemplateHash + + // ! + // DBPool locked { - // - // DBPool locked p.Lock() defer p.Unlock() - readyDBs, ok := p.ready[template.TemplateHash] - if ok { - // if exists already, check if pool size is not exceeded - if readyDBs.index+1 >= p.poolSize { - return TestDatabase{}, ErrPoolFull - } - - } else { - // add newSingleHashDBPool if doesn't exist already - readyDBs = newSingleHashDBPool(p.poolSize) - p.ready[template.TemplateHash] = readyDBs + hashDBs := p.pool[hash] + if hashDBs == nil { + hashDBs = newDBHashPool(p.maxPoolSize) + p.pool[hash] = hashDBs } - } - // - // singleHashDBPool locked - // - readyDBs.Lock() - defer readyDBs.Unlock() - - // index points now to the DB to be added - readyDBs.index++ - - // prepare test database structure based on the template - newTestDB := TestDatabase{ - Database: Database{ - TemplateHash: template.TemplateHash, - Config: template.Config, - }, - ID: readyDBs.index, - } - dbName := fmt.Sprintf("%s%03d", dbNamePrefix, readyDBs.index) - newTestDB.Database.Config.Database = dbName + // ! + // dbHashPool locked + hashDBs.Lock() + defer hashDBs.Unlock() - // add new database to ready pool - readyDBs.dbs[readyDBs.index] = newTestDB + // get index of a next test DB - its ID + index := len(hashDBs.dbs) + if index >= p.maxPoolSize { + return TestDatabase{}, ErrPoolFull + } + + newTestDB = TestDatabase{ + Database: Database{ + TemplateHash: template.TemplateHash, + Config: template.Config, + }, + ID: index, + } + // db name has an ID in suffix + dbName := fmt.Sprintf("%s%03d", dbNamePrefix, index) + newTestDB.Database.Config.Database = dbName + + // add new test DB to the pool + hashDBs.dbs = append(hashDBs.dbs, newTestDB) + + // and add its index to 'ready' + ready := p.ready[hash] + if ready == nil { + ready = make([]int, 0, p.maxPoolSize) + } + ready = append(ready, index) + p.ready[hash] = ready + + hashDBs.Lock() + } + // dbHashPool unlocked + // ! + // DBPool unlocked + // ! return newTestDB, nil } + +func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { + + // ! + // DBPool locked + { + p.Lock() + defer p.Unlock() + + dirty := p.dirty[hash] + if len(dirty) == 0 { + return ErrNotInPool + } + + exists := dirty[id] + if !exists { + return ErrNotInPool + } + + // p.ready + + } + +} From 5ad627e1e3cd46a2587e5392ffccea2a6cc45054 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 16 Jun 2023 19:53:04 +0000 Subject: [PATCH 013/160] improve ready/dirty system Ready are fully initalized and clean (only when adding a new fresh DB). Dirty are no longer used, but need to be recreated. --- pkg/manager/my/pool.go | 184 ++++++++++++++++++++--------------------- 1 file changed, 92 insertions(+), 92 deletions(-) diff --git a/pkg/manager/my/pool.go b/pkg/manager/my/pool.go index c28d21c..fb2880c 100644 --- a/pkg/manager/my/pool.go +++ b/pkg/manager/my/pool.go @@ -12,31 +12,33 @@ var ( ErrPoolFull = errors.New("database pool is full") ErrNotInPool = errors.New("database is not in the pool") ErrNoDBReady = errors.New("no database is currently ready, perhaps you need to create one") - ErrInvalidIndex = errors.New("invalid index == poor implementation :(") + ErrInvalidIndex = errors.New("invalid database index (ID)") ) type DBPool struct { - pool map[string]*dbHashPool // map[hash] - ready map[string][]int // map[hash] - dirty map[string]map[int]bool // map[hash] + pool map[string]*dbHashPool // map[hash] + ready map[string]dbIDMap // map[hash], initalized DBs according to a template, ready to pick them up + dirty map[string]dbIDMap // map[hash], returned DBs, need to be initalized again to reuse them sync.RWMutex maxPoolSize int } +type dbIDMap map[int]bool // map[db ID] + func NewDBPool(maxPoolSize int) *DBPool { return &DBPool{ - pool: make(map[string]*dbHashPool), - ready: make(map[string][]int), - dirty: make(map[string]map[int]bool), + pool: make(map[string]*dbHashPool), + + ready: make(map[string]dbIDMap), + dirty: make(map[string]dbIDMap), + maxPoolSize: maxPoolSize, } } type dbHashPool struct { dbs []TestDatabase - - sync.RWMutex } func newDBHashPool(maxPoolSize int) *dbHashPool { @@ -45,90 +47,84 @@ func newDBHashPool(maxPoolSize int) *dbHashPool { } } -func (p *DBPool) GetReadyDB(ctx context.Context, hash string) (TestDatabase, error) { +func popFirstKey(idMap dbIDMap) int { + id := -1 + for key := range idMap { + id = key + break + } + delete(idMap, id) + return id +} + +func (p *DBPool) GetDB(ctx context.Context, hash string) (db TestDatabase, isDirty bool, err error) { var hashDBs *dbHashPool var index int // ! // DBPool locked - { - p.Lock() - defer p.Unlock() - - ready := p.ready[hash] - if len(ready) == 0 { - return TestDatabase{}, ErrNoDBReady - } + p.Lock() + defer p.Unlock() - // get and remove last 'ready' index - index = ready[len(ready)-1] - ready = ready[:len(ready)-1] + ready := p.ready[hash] + if len(ready) > 0 { + // if there are some ready to be used DB, just get one + index = popFirstKey(ready) p.ready[hash] = ready - - // sanity check, should never happen - if index >= p.maxPoolSize { - return TestDatabase{}, ErrInvalidIndex - } - - hashDBs = p.pool[hash] - if hashDBs == nil { - // should not happen - return TestDatabase{}, ErrPoolEmpty - } - - // add the index to 'dirty' + } else { + // if no DBs are ready, reuse the dirty ones dirty := p.dirty[hash] - if dirty == nil { - dirty = make(map[int]bool) + if len(dirty) == 0 { + return TestDatabase{}, false, ErrNoDBReady } - dirty[index] = true + + isDirty = true + index = popFirstKey(dirty) p.dirty[hash] = dirty + } - // ! - // dbHashPool locked before unlocking DBPool - hashDBs.Lock() + // sanity check, should never happen + if index < 0 || index >= p.maxPoolSize { + return TestDatabase{}, false, ErrInvalidIndex + } + + hashDBs = p.pool[hash] + if hashDBs == nil { + // should not happen + return TestDatabase{}, false, ErrPoolEmpty } - // DBPool unlocked - // ! - defer hashDBs.Unlock() // pick a ready test database from the index if len(hashDBs.dbs) <= index { - return TestDatabase{}, ErrInvalidIndex + return TestDatabase{}, false, ErrInvalidIndex } - return hashDBs.dbs[index], nil - // dbHashPool unlocked + return hashDBs.dbs[index], isDirty, nil + // DBPool unlocked // ! } -func (p *DBPool) AddTestDatabase(ctx context.Context, template TemplateConfig, dbNamePrefix string) (TestDatabase, error) { +func (p *DBPool) AddTestDatabase(ctx context.Context, template TemplateConfig, dbNamePrefix string, initFunc func(TestDatabase) error) (TestDatabase, error) { var newTestDB TestDatabase hash := template.TemplateHash - // ! - // DBPool locked - { - p.Lock() - defer p.Unlock() - - hashDBs := p.pool[hash] - if hashDBs == nil { - hashDBs = newDBHashPool(p.maxPoolSize) - p.pool[hash] = hashDBs - } + p.Lock() + defer p.Unlock() - // ! - // dbHashPool locked - hashDBs.Lock() - defer hashDBs.Unlock() + hashDBs := p.pool[hash] + if hashDBs == nil { + hashDBs = newDBHashPool(p.maxPoolSize) + p.pool[hash] = hashDBs + } - // get index of a next test DB - its ID - index := len(hashDBs.dbs) - if index >= p.maxPoolSize { - return TestDatabase{}, ErrPoolFull - } + // get index of a next test DB - its ID + index := len(hashDBs.dbs) + if index >= p.maxPoolSize { + return TestDatabase{}, ErrPoolFull + } + { + // initalization of a new DB newTestDB = TestDatabase{ Database: Database{ TemplateHash: template.TemplateHash, @@ -140,47 +136,51 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, template TemplateConfig, d dbName := fmt.Sprintf("%s%03d", dbNamePrefix, index) newTestDB.Database.Config.Database = dbName - // add new test DB to the pool - hashDBs.dbs = append(hashDBs.dbs, newTestDB) - - // and add its index to 'ready' - ready := p.ready[hash] - if ready == nil { - ready = make([]int, 0, p.maxPoolSize) + if err := initFunc(newTestDB); err != nil { + return TestDatabase{}, err } - ready = append(ready, index) - p.ready[hash] = ready + } - hashDBs.Lock() + // add new test DB to the pool + hashDBs.dbs = append(hashDBs.dbs, newTestDB) + + // and add its index to 'ready' + ready := p.ready[hash] + if ready == nil { + ready = make(dbIDMap) } - // dbHashPool unlocked - // ! - // DBPool unlocked - // ! + + ready[index] = true + p.ready[hash] = ready return newTestDB, nil + // DBPool unlocked + // ! } func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { // ! // DBPool locked - { - p.Lock() - defer p.Unlock() + p.Lock() + defer p.Unlock() - dirty := p.dirty[hash] - if len(dirty) == 0 { - return ErrNotInPool - } + if id < 0 || id >= p.maxPoolSize { + return ErrInvalidIndex + } + dirty := p.dirty[hash] + // check if pool has been already returned + if dirty != nil && len(dirty) > 0 { exists := dirty[id] - if !exists { + if exists { return ErrNotInPool } - - // p.ready - } + // ok, it hasn't been returned yet + dirty[id] = true + p.dirty[hash] = dirty + + return nil } From fdd10848ac427a38487903901380b2218ee3fba6 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 16 Jun 2023 20:16:11 +0000 Subject: [PATCH 014/160] keep pool, ready and dirty in dbHashPool --- pkg/manager/my/pool.go | 190 ++++++++++++++++++++++++----------------- 1 file changed, 111 insertions(+), 79 deletions(-) diff --git a/pkg/manager/my/pool.go b/pkg/manager/my/pool.go index fb2880c..c1f2f2a 100644 --- a/pkg/manager/my/pool.go +++ b/pkg/manager/my/pool.go @@ -8,7 +8,7 @@ import ( ) var ( - ErrPoolEmpty = errors.New("no database exists for this hash") + ErrNoPool = errors.New("no database exists for this hash") ErrPoolFull = errors.New("database pool is full") ErrNotInPool = errors.New("database is not in the pool") ErrNoDBReady = errors.New("no database is currently ready, perhaps you need to create one") @@ -16,9 +16,7 @@ var ( ) type DBPool struct { - pool map[string]*dbHashPool // map[hash] - ready map[string]dbIDMap // map[hash], initalized DBs according to a template, ready to pick them up - dirty map[string]dbIDMap // map[hash], returned DBs, need to be initalized again to reuse them + pools map[string]*dbHashPool // map[hash] sync.RWMutex maxPoolSize int @@ -28,22 +26,25 @@ type dbIDMap map[int]bool // map[db ID] func NewDBPool(maxPoolSize int) *DBPool { return &DBPool{ - pool: make(map[string]*dbHashPool), - - ready: make(map[string]dbIDMap), - dirty: make(map[string]dbIDMap), + pools: make(map[string]*dbHashPool), maxPoolSize: maxPoolSize, } } type dbHashPool struct { - dbs []TestDatabase + dbs []TestDatabase + ready dbIDMap // initalized DBs according to a template, ready to pick them up + dirty dbIDMap // returned DBs, need to be initalized again to reuse them + + sync.RWMutex } func newDBHashPool(maxPoolSize int) *dbHashPool { return &dbHashPool{ - dbs: make([]TestDatabase, 0, maxPoolSize), + dbs: make([]TestDatabase, 0, maxPoolSize), + ready: make(dbIDMap), + dirty: make(dbIDMap), } } @@ -58,129 +59,160 @@ func popFirstKey(idMap dbIDMap) int { } func (p *DBPool) GetDB(ctx context.Context, hash string) (db TestDatabase, isDirty bool, err error) { - var hashDBs *dbHashPool - var index int + var pool *dbHashPool + + { + // ! + // DBPool locked + p.Lock() + defer p.Unlock() + + pool = p.pools[hash] + // DBPool unlocked + // ! + } + + if pool == nil { + // no such pool + err = ErrNoPool + return + } // ! - // DBPool locked - p.Lock() - defer p.Unlock() + // dbHashPool locked + pool.Lock() + defer pool.Unlock() - ready := p.ready[hash] - if len(ready) > 0 { + var index int + if len(pool.ready) > 0 { // if there are some ready to be used DB, just get one - index = popFirstKey(ready) - p.ready[hash] = ready + index = popFirstKey(pool.ready) } else { // if no DBs are ready, reuse the dirty ones - dirty := p.dirty[hash] - if len(dirty) == 0 { - return TestDatabase{}, false, ErrNoDBReady + if len(pool.dirty) == 0 { + err = ErrNoDBReady + return } isDirty = true - index = popFirstKey(dirty) - p.dirty[hash] = dirty + index = popFirstKey(pool.dirty) } // sanity check, should never happen if index < 0 || index >= p.maxPoolSize { - return TestDatabase{}, false, ErrInvalidIndex - } - - hashDBs = p.pool[hash] - if hashDBs == nil { - // should not happen - return TestDatabase{}, false, ErrPoolEmpty + err = ErrInvalidIndex + return } // pick a ready test database from the index - if len(hashDBs.dbs) <= index { - return TestDatabase{}, false, ErrInvalidIndex + if len(pool.dbs) <= index { + err = ErrInvalidIndex + return } - return hashDBs.dbs[index], isDirty, nil - // DBPool unlocked + return pool.dbs[index], isDirty, nil + // dbHashPool unlocked // ! + } func (p *DBPool) AddTestDatabase(ctx context.Context, template TemplateConfig, dbNamePrefix string, initFunc func(TestDatabase) error) (TestDatabase, error) { - var newTestDB TestDatabase + var pool *dbHashPool hash := template.TemplateHash - p.Lock() - defer p.Unlock() - - hashDBs := p.pool[hash] - if hashDBs == nil { - hashDBs = newDBHashPool(p.maxPoolSize) - p.pool[hash] = hashDBs + { + // ! + // DBPool locked + p.Lock() + defer p.Unlock() + + pool = p.pools[hash] + if pool == nil { + pool = newDBHashPool(p.maxPoolSize) + p.pools[hash] = pool + } + // DBPool unlocked + // ! } + // ! + // dbHashPool locked + pool.Lock() + defer pool.Unlock() + // get index of a next test DB - its ID - index := len(hashDBs.dbs) + index := len(pool.dbs) if index >= p.maxPoolSize { return TestDatabase{}, ErrPoolFull } - { - // initalization of a new DB - newTestDB = TestDatabase{ - Database: Database{ - TemplateHash: template.TemplateHash, - Config: template.Config, - }, - ID: index, - } - // db name has an ID in suffix - dbName := fmt.Sprintf("%s%03d", dbNamePrefix, index) - newTestDB.Database.Config.Database = dbName + // initalization of a new DB + newTestDB := TestDatabase{ + Database: Database{ + TemplateHash: template.TemplateHash, + Config: template.Config, + }, + ID: index, + } + // db name has an ID in suffix + dbName := fmt.Sprintf("%s%03d", dbNamePrefix, index) + newTestDB.Database.Config.Database = dbName - if err := initFunc(newTestDB); err != nil { - return TestDatabase{}, err - } + if err := initFunc(newTestDB); err != nil { + return TestDatabase{}, err } // add new test DB to the pool - hashDBs.dbs = append(hashDBs.dbs, newTestDB) + pool.dbs = append(pool.dbs, newTestDB) // and add its index to 'ready' - ready := p.ready[hash] - if ready == nil { - ready = make(dbIDMap) - } - - ready[index] = true - p.ready[hash] = ready + pool.ready[index] = true return newTestDB, nil - // DBPool unlocked + // dbHashPool unlocked // ! } func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { + var pool *dbHashPool - // ! - // DBPool locked - p.Lock() - defer p.Unlock() + { + // ! + // DBPool locked + p.Lock() + defer p.Unlock() + + // needs to be checked inside locked region + // because we access maxPoolSize + if id < 0 || id >= p.maxPoolSize { + return ErrInvalidIndex + } - if id < 0 || id >= p.maxPoolSize { - return ErrInvalidIndex + pool = p.pools[hash] + // DBPool unlocked + // ! } - dirty := p.dirty[hash] + if pool == nil { + // no such pool + return ErrNoPool + } + + // ! + // dbHashPool locked + pool.Lock() + defer pool.Unlock() + // check if pool has been already returned - if dirty != nil && len(dirty) > 0 { - exists := dirty[id] + if pool.dirty != nil && len(pool.dirty) > 0 { + exists := pool.dirty[id] if exists { return ErrNotInPool } } // ok, it hasn't been returned yet - dirty[id] = true - p.dirty[hash] = dirty + pool.dirty[id] = true return nil } From 6524cbcf03b2e2221fc247662e99b0706db180e4 Mon Sep 17 00:00:00 2001 From: anjankow Date: Mon, 19 Jun 2023 09:01:37 +0000 Subject: [PATCH 015/160] move pool to its own package --- pkg/db/database.go | 6 ++ pkg/{manager/my => db}/database_config.go | 2 +- pkg/{manager/my => db}/test_database.go | 2 +- pkg/manager/my/database.go | 107 ---------------------- pkg/manager/my/template_database.go | 17 ---- pkg/{manager/my => pool}/pool.go | 28 +++--- 6 files changed, 23 insertions(+), 139 deletions(-) create mode 100644 pkg/db/database.go rename pkg/{manager/my => db}/database_config.go (98%) rename pkg/{manager/my => db}/test_database.go (83%) delete mode 100644 pkg/manager/my/database.go delete mode 100644 pkg/manager/my/template_database.go rename pkg/{manager/my => pool}/pool.go (80%) diff --git a/pkg/db/database.go b/pkg/db/database.go new file mode 100644 index 0000000..268eefa --- /dev/null +++ b/pkg/db/database.go @@ -0,0 +1,6 @@ +package db + +type Database struct { + TemplateHash string `json:"templateHash"` + Config DatabaseConfig `json:"config"` +} diff --git a/pkg/manager/my/database_config.go b/pkg/db/database_config.go similarity index 98% rename from pkg/manager/my/database_config.go rename to pkg/db/database_config.go index 4c351ae..8a3ef7f 100644 --- a/pkg/manager/my/database_config.go +++ b/pkg/db/database_config.go @@ -1,4 +1,4 @@ -package manager +package db import ( "fmt" diff --git a/pkg/manager/my/test_database.go b/pkg/db/test_database.go similarity index 83% rename from pkg/manager/my/test_database.go rename to pkg/db/test_database.go index 4cc602a..0e71c7f 100644 --- a/pkg/manager/my/test_database.go +++ b/pkg/db/test_database.go @@ -1,4 +1,4 @@ -package manager +package db type TestDatabase struct { Database `json:"database"` diff --git a/pkg/manager/my/database.go b/pkg/manager/my/database.go deleted file mode 100644 index 3bd29c3..0000000 --- a/pkg/manager/my/database.go +++ /dev/null @@ -1,107 +0,0 @@ -package manager - -import ( - "errors" -) - -type databaseState int - -const ( - databaseStateInit databaseState = iota - databaseStateDiscarded databaseState = iota - databaseStateReady databaseState = iota -) - -var ErrDatabaseDiscarded = errors.New("ErrDatabaseDiscarded") - -type Database struct { - TemplateHash string `json:"templateHash"` - Config DatabaseConfig `json:"config"` -} - -// func (d *Database) State(ctx context.Context) databaseState { -// reg := trace.StartRegion(ctx, "db_get_state") -// defer reg.End() - -// d.RLock() -// defer d.RUnlock() - -// return d.state -// } - -// func (d *Database) Ready(ctx context.Context) bool { -// reg := trace.StartRegion(ctx, "db_check_ready") -// defer reg.End() - -// d.RLock() -// defer d.RUnlock() - -// return d.state == databaseStateReady -// } - -// func (d *Database) WaitUntilReady(ctx context.Context) error { -// reg := trace.StartRegion(ctx, "db_wait_ready") -// defer reg.End() - -// state := d.State(ctx) - -// if state == databaseStateReady { -// return nil -// } else if state == databaseStateDiscarded { -// return ErrDatabaseDiscarded -// } - -// for { -// select { -// case <-d.c: -// state := d.State(ctx) - -// if state == databaseStateReady { -// return nil -// } else if state == databaseStateDiscarded { -// return ErrDatabaseDiscarded -// } - -// case <-ctx.Done(): -// return ctx.Err() -// } -// } -// } - -// func (d *Database) FlagAsReady(ctx context.Context) { -// reg := trace.StartRegion(ctx, "db_flag_ready") -// defer reg.End() - -// state := d.State(ctx) -// if state != databaseStateInit { -// return -// } - -// d.Lock() -// defer d.Unlock() - -// d.state = databaseStateReady - -// if d.c != nil { -// close(d.c) -// } -// } - -// func (d *Database) FlagAsDiscarded(ctx context.Context) { -// reg := trace.StartRegion(ctx, "db_flag_discarded") -// defer reg.End() - -// state := d.State(ctx) -// if state != databaseStateInit { -// return -// } - -// d.Lock() -// defer d.Unlock() - -// d.state = databaseStateDiscarded - -// if d.c != nil { -// close(d.c) -// } -// } diff --git a/pkg/manager/my/template_database.go b/pkg/manager/my/template_database.go deleted file mode 100644 index e0d491c..0000000 --- a/pkg/manager/my/template_database.go +++ /dev/null @@ -1,17 +0,0 @@ -package manager - -type TemplateConfig struct { - TemplateHash string `json:"templateHash"` - Config DatabaseConfig `json:"config"` -} - -func (c TemplateConfig) IsEmpty() bool { - return c.TemplateHash == "" -} - -type TemplateDatabase struct { - Database `json:"database"` - - nextTestID int - testDatabases []*TestDatabase -} diff --git a/pkg/manager/my/pool.go b/pkg/pool/pool.go similarity index 80% rename from pkg/manager/my/pool.go rename to pkg/pool/pool.go index c1f2f2a..0435b8d 100644 --- a/pkg/manager/my/pool.go +++ b/pkg/pool/pool.go @@ -1,18 +1,20 @@ -package manager +package pool import ( "context" "errors" "fmt" "sync" + + "github.com/allaboutapps/integresql/pkg/db" ) var ( - ErrNoPool = errors.New("no database exists for this hash") + ErrNoPool = errors.New("no db.Database exists for this hash") ErrPoolFull = errors.New("database pool is full") ErrNotInPool = errors.New("database is not in the pool") - ErrNoDBReady = errors.New("no database is currently ready, perhaps you need to create one") - ErrInvalidIndex = errors.New("invalid database index (ID)") + ErrNoDBReady = errors.New("no db.Database is currently ready, perhaps you need to create one") + ErrInvalidIndex = errors.New("invalid db.Database index (ID)") ) type DBPool struct { @@ -33,7 +35,7 @@ func NewDBPool(maxPoolSize int) *DBPool { } type dbHashPool struct { - dbs []TestDatabase + dbs []db.TestDatabase ready dbIDMap // initalized DBs according to a template, ready to pick them up dirty dbIDMap // returned DBs, need to be initalized again to reuse them @@ -42,7 +44,7 @@ type dbHashPool struct { func newDBHashPool(maxPoolSize int) *dbHashPool { return &dbHashPool{ - dbs: make([]TestDatabase, 0, maxPoolSize), + dbs: make([]db.TestDatabase, 0, maxPoolSize), ready: make(dbIDMap), dirty: make(dbIDMap), } @@ -58,7 +60,7 @@ func popFirstKey(idMap dbIDMap) int { return id } -func (p *DBPool) GetDB(ctx context.Context, hash string) (db TestDatabase, isDirty bool, err error) { +func (p *DBPool) GetDB(ctx context.Context, hash string) (db db.TestDatabase, isDirty bool, err error) { var pool *dbHashPool { @@ -104,7 +106,7 @@ func (p *DBPool) GetDB(ctx context.Context, hash string) (db TestDatabase, isDir return } - // pick a ready test database from the index + // pick a ready test db.Database from the index if len(pool.dbs) <= index { err = ErrInvalidIndex return @@ -116,7 +118,7 @@ func (p *DBPool) GetDB(ctx context.Context, hash string) (db TestDatabase, isDir } -func (p *DBPool) AddTestDatabase(ctx context.Context, template TemplateConfig, dbNamePrefix string, initFunc func(TestDatabase) error) (TestDatabase, error) { +func (p *DBPool) AddTestDatabase(ctx context.Context, template db.Database, dbNamePrefix string, initFunc func(db.TestDatabase) error) (db.TestDatabase, error) { var pool *dbHashPool hash := template.TemplateHash @@ -143,12 +145,12 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, template TemplateConfig, d // get index of a next test DB - its ID index := len(pool.dbs) if index >= p.maxPoolSize { - return TestDatabase{}, ErrPoolFull + return db.TestDatabase{}, ErrPoolFull } // initalization of a new DB - newTestDB := TestDatabase{ - Database: Database{ + newTestDB := db.TestDatabase{ + Database: db.Database{ TemplateHash: template.TemplateHash, Config: template.Config, }, @@ -159,7 +161,7 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, template TemplateConfig, d newTestDB.Database.Config.Database = dbName if err := initFunc(newTestDB); err != nil { - return TestDatabase{}, err + return db.TestDatabase{}, err } // add new test DB to the pool From 2d9fdacec2c641f9595d3ab72eb45dbc11abb8b4 Mon Sep 17 00:00:00 2001 From: anjankow Date: Mon, 19 Jun 2023 09:17:22 +0000 Subject: [PATCH 016/160] add template collection --- pkg/templates/template_collection.go | 53 ++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 pkg/templates/template_collection.go diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go new file mode 100644 index 0000000..184174d --- /dev/null +++ b/pkg/templates/template_collection.go @@ -0,0 +1,53 @@ +package templates + +import ( + "context" + "runtime/trace" + "sync" + + "github.com/allaboutapps/integresql/pkg/db" +) + +type Collection struct { + templates map[string]db.DatabaseConfig + templateMutex sync.RWMutex +} + +func NewCollection() *Collection { + return &Collection{ + templates: make(map[string]db.DatabaseConfig), + templateMutex: sync.RWMutex{}, + } +} + +func (tc *Collection) Push(ctx context.Context, hash string, template db.DatabaseConfig) (added bool) { + reg := trace.StartRegion(ctx, "get_template_lock") + defer reg.End() + + tc.templateMutex.Lock() + defer tc.templateMutex.Unlock() + + _, ok := tc.templates[hash] + if ok { + return false + } + + tc.templates[hash] = template + return true +} + +func (tc *Collection) Pop(ctx context.Context, hash string) db.DatabaseConfig { + reg := trace.StartRegion(ctx, "get_template_lock") + defer reg.End() + + tc.templateMutex.Lock() + defer tc.templateMutex.Unlock() + + template, ok := tc.templates[hash] + if !ok { + return db.DatabaseConfig{} + } + + delete(tc.templates, hash) + return template +} From 7dc9fdeb10276cb85ac4373d3c566338c6ab9bf6 Mon Sep 17 00:00:00 2001 From: anjankow Date: Mon, 19 Jun 2023 09:48:14 +0000 Subject: [PATCH 017/160] add template collection --- pkg/templates/template_collection.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go index 184174d..40d6acd 100644 --- a/pkg/templates/template_collection.go +++ b/pkg/templates/template_collection.go @@ -13,6 +13,8 @@ type Collection struct { templateMutex sync.RWMutex } +type Unlock func() + func NewCollection() *Collection { return &Collection{ templates: make(map[string]db.DatabaseConfig), @@ -20,20 +22,22 @@ func NewCollection() *Collection { } } -func (tc *Collection) Push(ctx context.Context, hash string, template db.DatabaseConfig) (added bool) { +func (tc *Collection) Push(ctx context.Context, hash string, template db.DatabaseConfig) (added bool, unlock Unlock) { reg := trace.StartRegion(ctx, "get_template_lock") - defer reg.End() - tc.templateMutex.Lock() - defer tc.templateMutex.Unlock() + + unlock = func() { + tc.templateMutex.Unlock() + reg.End() + } _, ok := tc.templates[hash] if ok { - return false + return false, unlock } tc.templates[hash] = template - return true + return true, unlock } func (tc *Collection) Pop(ctx context.Context, hash string) db.DatabaseConfig { @@ -51,3 +55,7 @@ func (tc *Collection) Pop(ctx context.Context, hash string) db.DatabaseConfig { delete(tc.templates, hash) return template } + +func (tc *Collection) RemoveUnsafe(ctx context.Context, hash string) { + delete(tc.templates, hash) +} From faab03f207499cf379734b52e7df4f6e9d0717ec Mon Sep 17 00:00:00 2001 From: anjankow Date: Mon, 19 Jun 2023 09:49:22 +0000 Subject: [PATCH 018/160] use template collection for template init --- pkg/manager/manager.go | 71 +++++++++++++++++-------------------- pkg/manager/manager_test.go | 22 +++++------- pkg/manager/testing.go | 8 +++-- 3 files changed, 46 insertions(+), 55 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 40e1901..ab34098 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -10,6 +10,9 @@ import ( "sync" "time" + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/pool" + "github.com/allaboutapps/integresql/pkg/templates" "github.com/lib/pq" ) @@ -26,14 +29,19 @@ type Manager struct { templates map[string]*TemplateDatabase templateMutex sync.RWMutex wg sync.WaitGroup + + templatesX *templates.Collection + pool *pool.DBPool } func New(config ManagerConfig) *Manager { m := &Manager{ - config: config, - db: nil, - templates: map[string]*TemplateDatabase{}, - wg: sync.WaitGroup{}, + config: config, + db: nil, + templates: map[string]*TemplateDatabase{}, + wg: sync.WaitGroup{}, + templatesX: templates.NewCollection(), + pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize), } if len(m.config.TestDatabaseOwner) == 0 { @@ -138,57 +146,42 @@ func (m *Manager) Initialize(ctx context.Context) error { return nil } -func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (*TemplateDatabase, error) { +func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (db.Database, error) { ctx, task := trace.NewTask(ctx, "initialize_template_db") defer task.End() if !m.Ready() { - return nil, ErrManagerNotReady + return db.Database{}, ErrManagerNotReady } - reg := trace.StartRegion(ctx, "get_template_lock") - m.templateMutex.Lock() - defer m.templateMutex.Unlock() - reg.End() - - _, ok := m.templates[hash] - - if ok { - // fmt.Println("initialized!", ok) - return nil, ErrTemplateAlreadyInitialized + dbName := fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) + templateConfig := db.DatabaseConfig{ + Host: m.config.ManagerDatabaseConfig.Host, + Port: m.config.ManagerDatabaseConfig.Port, + Username: m.config.ManagerDatabaseConfig.Username, + Password: m.config.ManagerDatabaseConfig.Password, + Database: dbName, } - // fmt.Println("initializing...", ok) + added, unlock := m.templatesX.Push(ctx, hash, templateConfig) + defer unlock() - dbName := fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) - template := &TemplateDatabase{ - Database: Database{ - TemplateHash: hash, - Config: DatabaseConfig{ - Host: m.config.ManagerDatabaseConfig.Host, - Port: m.config.ManagerDatabaseConfig.Port, - Username: m.config.ManagerDatabaseConfig.Username, - Password: m.config.ManagerDatabaseConfig.Password, - Database: dbName, - }, - state: databaseStateInit, - c: make(chan struct{}), - }, - nextTestID: 0, - testDatabases: make([]*TestDatabase, 0), + if !added { + return db.Database{}, ErrTemplateAlreadyInitialized } - reg = trace.StartRegion(ctx, "drop_and_create_db") + reg := trace.StartRegion(ctx, "drop_and_create_db") if err := m.dropAndCreateDatabase(ctx, dbName, m.config.ManagerDatabaseConfig.Username, m.config.TemplateDatabaseTemplate); err != nil { - delete(m.templates, hash) + m.templatesX.RemoveUnsafe(ctx, hash) - return nil, err + return db.Database{}, err } reg.End() - m.templates[hash] = template - - return template, nil + return db.Database{ + TemplateHash: hash, + Config: templateConfig, + }, nil } func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) error { diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 5996ded..7155b30 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/lib/pq" + "github.com/stretchr/testify/assert" ) func TestManagerConnect(t *testing.T) { @@ -106,12 +107,7 @@ func TestManagerInitializeTemplateDatabase(t *testing.T) { t.Fatalf("failed to initialize template database: %v", err) } - if template.Ready(ctx) { - t.Error("template database is marked as ready") - } - if template.TemplateHash != hash { - t.Errorf("template has not set correctly, got %q, want %q", template.TemplateHash, hash) - } + assert.Equal(t, hash, template.TemplateHash) } func TestManagerInitializeTemplateDatabaseTimeout(t *testing.T) { @@ -211,14 +207,14 @@ func TestManagerFinalizeTemplateDatabase(t *testing.T) { populateTemplateDB(t, template) - template, err = m.FinalizeTemplateDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to finalize template database: %v", err) - } + // template, err = m.FinalizeTemplateDatabase(ctx, hash) + // if err != nil { + // t.Fatalf("failed to finalize template database: %v", err) + // } - if !template.Ready(ctx) { - t.Error("template database is flagged as not ready") - } + // if !template.Ready(ctx) { + // t.Error("template database is flagged as not ready") + // } } func TestManagerFinalizeUntrackedTemplateDatabaseIsNotPossible(t *testing.T) { diff --git a/pkg/manager/testing.go b/pkg/manager/testing.go index 14ecdac..c540bac 100644 --- a/pkg/manager/testing.go +++ b/pkg/manager/testing.go @@ -6,6 +6,8 @@ import ( "errors" "testing" "time" + + "github.com/allaboutapps/integresql/pkg/db" ) func testManagerFromEnv() *Manager { @@ -38,15 +40,15 @@ func initTemplateDB(ctx context.Context, errs chan<- error, m *Manager) { return } - if template.Ready(ctx) { - errs <- errors.New("template database is marked as ready") + if template.TemplateHash != "hashinghash" { + errs <- errors.New("template database is invalid") return } errs <- nil } -func populateTemplateDB(t *testing.T, template *TemplateDatabase) { +func populateTemplateDB(t *testing.T, template db.Database) { t.Helper() db, err := sql.Open("postgres", template.Config.ConnectionString()) From e316b5ea049cfc3a573d74f3bc919a0e8923db06 Mon Sep 17 00:00:00 2001 From: anjankow Date: Mon, 19 Jun 2023 10:27:11 +0000 Subject: [PATCH 019/160] use template collection for discard template --- pkg/manager/manager.go | 37 +++---- pkg/manager/template_collection copy.go | 126 ++++++++++++++++++++++++ pkg/templates/template_collection.go | 37 +++++-- 3 files changed, 170 insertions(+), 30 deletions(-) create mode 100644 pkg/manager/template_collection copy.go diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index ab34098..97cd1e6 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -154,7 +154,7 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( return db.Database{}, ErrManagerNotReady } - dbName := fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) + dbName := m.makeTemplateDatabaseName(hash) templateConfig := db.DatabaseConfig{ Host: m.config.ManagerDatabaseConfig.Host, Port: m.config.ManagerDatabaseConfig.Port, @@ -193,15 +193,13 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro return ErrManagerNotReady } - reg := trace.StartRegion(ctx, "get_template_lock") - m.templateMutex.Lock() - defer m.templateMutex.Unlock() - reg.End() - - template, ok := m.templates[hash] + template, found, unlock := m.templatesX.Pop(ctx, hash) + defer unlock() + dbName := template.Config.Database - if !ok { - dbName := fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) + if !found { + // even if a template is not found in the collection, it might still exist in the DB + dbName = m.makeTemplateDatabaseName(hash) exists, err := m.checkDatabaseExists(ctx, dbName) if err != nil { return err @@ -212,18 +210,7 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro } } - // discard any still waiting dbs. - template.FlagAsDiscarded(ctx) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - if err := template.WaitUntilReady(ctx); err != nil { - cancel() - } - cancel() - - delete(m.templates, hash) - - return nil + return m.dropDatabase(ctx, dbName) } func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (*TemplateDatabase, error) { @@ -557,3 +544,11 @@ func (m *Manager) addTestDatabasesInBackground(template *TemplateDatabase, count _, _ = m.createNextTestDatabase(ctx, template) } } + +func (m *Manager) makeTemplateDatabaseName(hash string) string { + return fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) +} + +func (m *Manager) makeTestDatabasePrefix(hash string) string { + return fmt.Sprintf("%s_%s_%s_", m.config.DatabasePrefix, m.config.TestDatabasePrefix, hash) +} diff --git a/pkg/manager/template_collection copy.go b/pkg/manager/template_collection copy.go new file mode 100644 index 0000000..cfa1e42 --- /dev/null +++ b/pkg/manager/template_collection copy.go @@ -0,0 +1,126 @@ +package manager + +import ( + "context" + "runtime/trace" + "sync" +) + +type TemplateCollection struct { + templates map[string]TemplateConfig + templateMutex sync.RWMutex +} + +type Unlock func() + +func NewTemplateCollection() *TemplateCollection { + return &TemplateCollection{ + templates: make(map[string]TemplateConfig), + templateMutex: sync.RWMutex{}, + } +} + +func (tc *TemplateCollection) Add(ctx context.Context, hash string, template TemplateConfig) (added bool) { + reg := trace.StartRegion(ctx, "get_template_lock") + defer reg.End() + + tc.templateMutex.Lock() + defer tc.templateMutex.Unlock() + + _, ok := tc.templates[hash] + if ok { + return false + } + + tc.templates[hash] = template + return true +} + +func (tc *TemplateCollection) Pop(ctx context.Context, hash string) TemplateConfig { + reg := trace.StartRegion(ctx, "get_template_lock") + defer reg.End() + + tc.templateMutex.Lock() + defer tc.templateMutex.Unlock() + + template, ok := tc.templates[hash] + if !ok { + return TemplateConfig{} + } + + delete(tc.templates, hash) + return template +} + +func (tc *TemplateCollection) Remove(ctx context.Context, hash string) { + reg := trace.StartRegion(ctx, "get_template_lock") + defer reg.End() + + tc.templateMutex.Lock() + defer tc.templateMutex.Unlock() + + delete(tc.templates, hash) +} + +// func (tc *TemplateCollection) Get1(ctx context.Context, hash string) (*TemplateConfig, Unlock) { +// reg := trace.StartRegion(ctx, "get_template_lock") +// tc.templateMutex.Lock() + +// unlockFunc := func() { +// tc.templateMutex.Unlock() +// reg.End() +// } + +// template, ok := tc.templates[hash] +// if !ok { +// return nil, unlockFunc +// } +// return template, unlockFunc +// } + +// func (tc *TemplateCollection) GetForReading1(ctx context.Context, hash string) (*TemplateConfig, Unlock) { +// reg := trace.StartRegion(ctx, "get_template_lock") +// tc.templateMutex.RLock() + +// unlockFunc := func() { +// tc.templateMutex.RUnlock() +// reg.End() +// } + +// template, ok := tc.templates[hash] +// if !ok { +// return nil, unlockFunc +// } +// return template, unlockFunc +// } + +// func (tc *TemplateCollection) GetAllForReading1() (map[string]*TemplateConfig, Unlock) { +// tc.templateMutex.RLock() + +// unlockFunc := func() { +// tc.templateMutex.RUnlock() +// } + +// return tc.templates, unlockFunc +// } + +// func (tc *TemplateCollection) Reset1() { +// tc.templateMutex.Lock() +// defer tc.templateMutex.Unlock() + +// tc.templates = map[string]*TemplateConfig{} +// } + +// func (tc *TemplateCollection) RemoveUnsafe1(hash string) { +// // tc.templateMutex.Lock() +// // defer tc.templateMutex.Unlock() + +// delete(tc.templates, hash) +// } + +// func (tc *TemplateCollection) AddUnsafe1(hash string, template *TemplateConfig) { +// // tc.templateMutex.Lock() +// // defer tc.templateMutex.Unlock() + +// tc.templates[hash] = template +// } diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go index 40d6acd..2338e29 100644 --- a/pkg/templates/template_collection.go +++ b/pkg/templates/template_collection.go @@ -9,7 +9,7 @@ import ( ) type Collection struct { - templates map[string]db.DatabaseConfig + templates map[string]db.Database templateMutex sync.RWMutex } @@ -17,7 +17,7 @@ type Unlock func() func NewCollection() *Collection { return &Collection{ - templates: make(map[string]db.DatabaseConfig), + templates: make(map[string]db.Database), templateMutex: sync.RWMutex{}, } } @@ -36,24 +36,43 @@ func (tc *Collection) Push(ctx context.Context, hash string, template db.Databas return false, unlock } - tc.templates[hash] = template + tc.templates[hash] = db.Database{TemplateHash: hash, Config: template} return true, unlock } -func (tc *Collection) Pop(ctx context.Context, hash string) db.DatabaseConfig { +func (tc *Collection) Pop(ctx context.Context, hash string) (template db.Database, found bool, unlock Unlock) { reg := trace.StartRegion(ctx, "get_template_lock") - defer reg.End() - tc.templateMutex.Lock() - defer tc.templateMutex.Unlock() + + unlock = func() { + tc.templateMutex.Unlock() + reg.End() + } template, ok := tc.templates[hash] if !ok { - return db.DatabaseConfig{} + return db.Database{}, false, unlock } delete(tc.templates, hash) - return template + return template, true, unlock +} + +func (tc *Collection) Get(ctx context.Context, hash string) (template db.Database, found bool, unlock Unlock) { + reg := trace.StartRegion(ctx, "get_template_lock") + tc.templateMutex.Lock() + + unlock = func() { + tc.templateMutex.Unlock() + reg.End() + } + + template, ok := tc.templates[hash] + if !ok { + return db.Database{}, false, unlock + } + + return template, true, unlock } func (tc *Collection) RemoveUnsafe(ctx context.Context, hash string) { From bb51cba4639fa0be9039fd1427428b18d77b7906 Mon Sep 17 00:00:00 2001 From: anjankow Date: Mon, 19 Jun 2023 13:40:13 +0000 Subject: [PATCH 020/160] implement template state management --- pkg/templates/template.go | 81 +++++++++++++++++++++++++++ pkg/templates/template_collection.go | 36 ++++++------ pkg/templates/template_test.go | 82 ++++++++++++++++++++++++++++ 3 files changed, 180 insertions(+), 19 deletions(-) create mode 100644 pkg/templates/template.go create mode 100644 pkg/templates/template_test.go diff --git a/pkg/templates/template.go b/pkg/templates/template.go new file mode 100644 index 0000000..ec621d2 --- /dev/null +++ b/pkg/templates/template.go @@ -0,0 +1,81 @@ +package templates + +import ( + "context" + "sync" + "time" + + "github.com/allaboutapps/integresql/pkg/db" +) + +type TemplateState int32 + +const ( + TemplateStateInit TemplateState = iota + TemplateStateDiscarded + TemplateStateReady +) + +type Template struct { + db.Database + state TemplateState + + cond *sync.Cond + lock sync.RWMutex +} + +func NewTemplate(database db.Database) *Template { + t := &Template{ + Database: database, + state: TemplateStateInit, + } + t.cond = sync.NewCond(&t.lock) + + return t +} + +func (t *Template) GetState(ctx context.Context) TemplateState { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.state +} + +func (t *Template) SetState(ctx context.Context, newState TemplateState) { + if t.GetState(ctx) == newState { + return + } + + t.lock.Lock() + defer t.lock.Unlock() + t.state = newState + + t.cond.Broadcast() +} + +func (t *Template) WaitUntilReady(ctx context.Context, timeout time.Duration) (exitState TemplateState) { + currentState := t.GetState(ctx) + if currentState == TemplateStateReady { + return + } + + cctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + newStateChan := make(chan TemplateState, 1) + go func() { + t.cond.L.Lock() + defer t.cond.L.Unlock() + t.cond.Wait() + + newStateChan <- t.state + }() + + select { + case state := <-newStateChan: + return state + case <-cctx.Done(): + // timeout means that there were no state changes in the meantime + return currentState + } +} diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go index 2338e29..4421c35 100644 --- a/pkg/templates/template_collection.go +++ b/pkg/templates/template_collection.go @@ -9,25 +9,25 @@ import ( ) type Collection struct { - templates map[string]db.Database - templateMutex sync.RWMutex + templates map[string]*Template + collMutex sync.RWMutex } type Unlock func() func NewCollection() *Collection { return &Collection{ - templates: make(map[string]db.Database), - templateMutex: sync.RWMutex{}, + templates: make(map[string]*Template), + collMutex: sync.RWMutex{}, } } func (tc *Collection) Push(ctx context.Context, hash string, template db.DatabaseConfig) (added bool, unlock Unlock) { reg := trace.StartRegion(ctx, "get_template_lock") - tc.templateMutex.Lock() + tc.collMutex.Lock() unlock = func() { - tc.templateMutex.Unlock() + tc.collMutex.Unlock() reg.End() } @@ -36,43 +36,41 @@ func (tc *Collection) Push(ctx context.Context, hash string, template db.Databas return false, unlock } - tc.templates[hash] = db.Database{TemplateHash: hash, Config: template} + tc.templates[hash] = NewTemplate(db.Database{TemplateHash: hash, Config: template}) return true, unlock } -func (tc *Collection) Pop(ctx context.Context, hash string) (template db.Database, found bool, unlock Unlock) { +func (tc *Collection) Pop(ctx context.Context, hash string) (template *Template, found bool, unlock Unlock) { reg := trace.StartRegion(ctx, "get_template_lock") - tc.templateMutex.Lock() + tc.collMutex.Lock() unlock = func() { - tc.templateMutex.Unlock() + tc.collMutex.Unlock() reg.End() } template, ok := tc.templates[hash] if !ok { - return db.Database{}, false, unlock + return nil, false, unlock } delete(tc.templates, hash) return template, true, unlock } -func (tc *Collection) Get(ctx context.Context, hash string) (template db.Database, found bool, unlock Unlock) { +func (tc *Collection) Get(ctx context.Context, hash string) (template *Template, found bool) { reg := trace.StartRegion(ctx, "get_template_lock") - tc.templateMutex.Lock() + defer reg.End() - unlock = func() { - tc.templateMutex.Unlock() - reg.End() - } + tc.collMutex.Lock() + defer tc.collMutex.Unlock() template, ok := tc.templates[hash] if !ok { - return db.Database{}, false, unlock + return nil, false } - return template, true, unlock + return template, true } func (tc *Collection) RemoveUnsafe(ctx context.Context, hash string) { diff --git a/pkg/templates/template_test.go b/pkg/templates/template_test.go new file mode 100644 index 0000000..5802932 --- /dev/null +++ b/pkg/templates/template_test.go @@ -0,0 +1,82 @@ +package templates_test + +import ( + "context" + "errors" + "sync" + "testing" + "time" + + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/templates" + "github.com/stretchr/testify/assert" +) + +func TestTemplateGetSetState(t *testing.T) { + ctx := context.Background() + + t1 := templates.NewTemplate(db.Database{TemplateHash: "123"}) + state := t1.GetState(ctx) + assert.Equal(t, templates.TemplateStateInit, state) + + t1.SetState(ctx, templates.TemplateStateReady) + state = t1.GetState(ctx) + assert.Equal(t, templates.TemplateStateReady, state) + + t1.SetState(ctx, templates.TemplateStateDiscarded) + state = t1.GetState(ctx) + assert.Equal(t, templates.TemplateStateDiscarded, state) +} + +func TestTemplateWaitForReady(t *testing.T) { + ctx := context.Background() + goroutineNum := 10 + + // initalize a new template, not ready yet + t1 := templates.NewTemplate(db.Database{TemplateHash: "123"}) + state := t1.GetState(ctx) + assert.Equal(t, templates.TemplateStateInit, state) + + var wg sync.WaitGroup + errsChan := make(chan error, 2*goroutineNum) + + // these goroutines should get ready state after waiting long enough + for i := 0; i < goroutineNum; i++ { + wg.Add(1) + go func() { + defer wg.Done() + timeout := 1 * time.Second + state := t1.WaitUntilReady(ctx, timeout) + if state != templates.TemplateStateReady { + errsChan <- errors.New("expected ready, but is not") + } + }() + } + + // these goroutines should run into timeout + for i := 0; i < goroutineNum; i++ { + wg.Add(1) + go func() { + defer wg.Done() + timeout := 3 * time.Millisecond + state := t1.WaitUntilReady(ctx, timeout) + if state != templates.TemplateStateInit { + errsChan <- errors.New("expected state init, but is not") + } + }() + } + + // now set state + time.Sleep(5 * time.Millisecond) + t1.SetState(ctx, templates.TemplateStateReady) + + wg.Wait() + close(errsChan) + + if len(errsChan) > 0 { + for err := range errsChan { + t.Error(err) + } + t.Fail() + } +} From ba998102e0247608f2f2fc15a515de679e7c2538 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 20 Jun 2023 07:58:27 +0000 Subject: [PATCH 021/160] adapt get and finalize template --- pkg/manager/manager.go | 257 +++++++++--------------- pkg/manager/manager_config.go | 4 + pkg/manager/manager_test.go | 22 +- pkg/manager/template_collection copy.go | 126 ------------ pkg/manager/testing.go | 19 +- pkg/pool/pool.go | 54 ++--- pkg/templates/template.go | 14 +- pkg/templates/template_collection.go | 13 +- 8 files changed, 139 insertions(+), 370 deletions(-) delete mode 100644 pkg/manager/template_collection copy.go diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 97cd1e6..82bfb2d 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "runtime/trace" - "sort" "sync" "time" @@ -20,6 +19,7 @@ var ( ErrManagerNotReady = errors.New("manager not ready") ErrTemplateAlreadyInitialized = errors.New("template is already initialized") ErrTemplateNotFound = errors.New("template not found") + ErrInvalidTemplateState = errors.New("unexpected template state") ErrTestNotFound = errors.New("test database not found") ) @@ -30,6 +30,7 @@ type Manager struct { templateMutex sync.RWMutex wg sync.WaitGroup + closeChan chan bool templatesX *templates.Collection pool *pool.DBPool } @@ -87,6 +88,8 @@ func (m *Manager) Disconnect(ctx context.Context, ignoreCloseError bool) error { return errors.New("manager is not connected") } + m.closeChan <- true + c := make(chan struct{}) go func() { defer close(c) @@ -164,6 +167,7 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( } added, unlock := m.templatesX.Push(ctx, hash, templateConfig) + // unlock template collection only after the template is actually initalized in the DB defer unlock() if !added { @@ -193,8 +197,7 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro return ErrManagerNotReady } - template, found, unlock := m.templatesX.Pop(ctx, hash) - defer unlock() + template, found := m.templatesX.Pop(ctx, hash) dbName := template.Config.Database if !found { @@ -208,101 +211,83 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro if !exists { return ErrTemplateNotFound } + } else { + template.SetState(ctx, templates.TemplateStateDiscarded) } return m.dropDatabase(ctx, dbName) } -func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (*TemplateDatabase, error) { +func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db.Database, error) { ctx, task := trace.NewTask(ctx, "finalize_template_db") defer task.End() if !m.Ready() { - return nil, ErrManagerNotReady + return db.Database{}, ErrManagerNotReady } - reg := trace.StartRegion(ctx, "get_template_lock") - m.templateMutex.Lock() - defer m.templateMutex.Unlock() - reg.End() - - template, ok := m.templates[hash] - - // We don't allow finalizing NEVER initialized database by integresql! - if !ok { - return nil, ErrTemplateNotFound + template, found := m.templatesX.Get(ctx, hash) + if !found { + return db.Database{}, ErrTemplateNotFound } - state := template.State(ctx) + state := template.GetState(ctx) // early bailout if we are already ready (multiple calls) - if state == databaseStateReady { - return template, nil + if state == templates.TemplateStateReady { + return template.Database, nil } // Disallow transition from discarded to ready - if state == databaseStateDiscarded { - return nil, ErrDatabaseDiscarded + if state == templates.TemplateStateDiscarded { + return db.Database{}, ErrDatabaseDiscarded } - template.FlagAsReady(ctx) + template.SetState(ctx, templates.TemplateStateReady) - m.wg.Add(1) - go func() { - defer m.wg.Done() - m.addTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) - }() + m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) - return template, nil + return template.Database, nil } -func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (*TestDatabase, error) { +func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatabase, error) { ctx, task := trace.NewTask(ctx, "get_test_db") defer task.End() if !m.Ready() { - return nil, ErrManagerNotReady + return db.TestDatabase{}, ErrManagerNotReady } - reg := trace.StartRegion(ctx, "get_template_lock") - m.templateMutex.RLock() - reg.End() - template, ok := m.templates[hash] - m.templateMutex.RUnlock() - - if !ok { - return nil, ErrTemplateNotFound + template, found := m.templatesX.Get(ctx, hash) + if !found { + return db.TestDatabase{}, ErrTemplateNotFound } - if err := template.WaitUntilReady(ctx); err != nil { - return nil, err + // if the template has been discarded/not initalized yet, + // no DB should be returned, even if already in the pool + state := template.WaitUntilReady(ctx, m.config.TestDatabaseWaitTimeout) + if state != templates.TemplateStateReady { + return db.TestDatabase{}, ErrInvalidTemplateState } - template.Lock() - defer template.Unlock() - - var testDB *TestDatabase - for _, db := range template.testDatabases { - if db.ReadyForTest(ctx) { - testDB = db - break + testDB, dirty, err := m.pool.GetDB(ctx, template.TemplateHash) + if err != nil { + if !errors.Is(err, pool.ErrNoDBReady) { + // internal error occurred, return directly + return db.TestDatabase{}, err } - } - if testDB == nil { - var err error - testDB, err = m.createNextTestDatabase(ctx, template) - if err != nil { - return nil, err - } + // no DB is ready, we can try to add a new DB is pool is not full + return m.createTestDatabaseFromTemplate(ctx, template) } - testDB.FlagAsDirty(ctx) - - m.wg.Add(1) - go m.addTestDatabasesInBackground(template, 1) + // if no error occurred, a testDB has been found + if !dirty { + return testDB, nil + } - return testDB, nil + // clean it, if it's dirty, before returning it to the user + return m.cleanTestDatabase(ctx, testDB, m.makeTemplateDatabaseName(testDB.TemplateHash)) } func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) error { @@ -310,63 +295,18 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e return ErrManagerNotReady } - m.templateMutex.RLock() - template, ok := m.templates[hash] - m.templateMutex.RUnlock() - - if !ok { + // check if the template exists and is 'ready' + template, found := m.templatesX.Get(ctx, hash) + if !found { return ErrTemplateNotFound } - if err := template.WaitUntilReady(ctx); err != nil { - return err + if template.WaitUntilReady(ctx, m.config.TestDatabaseWaitTimeout) != templates.TemplateStateReady { + return ErrInvalidTemplateState } - template.Lock() - defer template.Unlock() - - found := false - for _, db := range template.testDatabases { - if db.ID == id { - found = true - db.FlagAsClean(ctx) - break - } - } - - if !found { - dbName := fmt.Sprintf("%s_%s_%s_%03d", m.config.DatabasePrefix, m.config.TestDatabasePrefix, hash, id) - exists, err := m.checkDatabaseExists(ctx, dbName) - if err != nil { - return err - } - - if !exists { - return ErrTestNotFound - } - - db := &TestDatabase{ - Database: Database{ - TemplateHash: hash, - Config: DatabaseConfig{ - Host: m.config.ManagerDatabaseConfig.Host, - Port: m.config.ManagerDatabaseConfig.Port, - Username: m.config.TestDatabaseOwner, - Password: m.config.TestDatabaseOwnerPassword, - Database: dbName, - }, - state: databaseStateReady, - c: make(chan struct{}), - }, - ID: id, - dirty: false, - } - - template.testDatabases = append(template.testDatabases, db) - sort.Sort(ByID(template.testDatabases)) - } - - return nil + // template is ready, we can return the testDB to the pool + return m.pool.ReturnTestDatabase(ctx, hash, id) } func (m *Manager) ClearTrackedTestDatabases(hash string) error { @@ -476,72 +416,57 @@ func (m *Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owne return m.createDatabase(ctx, dbName, owner, template) } -// Creates a new test database for the template and increments the next ID. -// ! ATTENTION: this function assumes `template` has already been LOCKED by its caller and will NOT synchronize access again ! -// The newly created database object is returned as well as added to the template's DB list automatically. -func (m *Manager) createNextTestDatabase(ctx context.Context, template *TemplateDatabase) (*TestDatabase, error) { - dbName := fmt.Sprintf("%s_%s_%s_%03d", m.config.DatabasePrefix, m.config.TestDatabasePrefix, template.TemplateHash, template.nextTestID) - - if err := m.dropAndCreateDatabase(ctx, dbName, m.config.TestDatabaseOwner, template.Config.Database); err != nil { - return nil, err - } - - testDB := &TestDatabase{ - Database: Database{ - TemplateHash: template.TemplateHash, - Config: DatabaseConfig{ - Host: m.config.ManagerDatabaseConfig.Host, - Port: m.config.ManagerDatabaseConfig.Port, - Username: m.config.TestDatabaseOwner, - Password: m.config.TestDatabaseOwnerPassword, - Database: dbName, - }, - state: databaseStateReady, - c: make(chan struct{}), - }, - ID: template.nextTestID, - dirty: false, - } - - template.testDatabases = append(template.testDatabases, testDB) - template.nextTestID++ - - if template.nextTestID > m.config.TestDatabaseMaxPoolSize { - i := 0 - for idx, db := range template.testDatabases { - if db.Dirty(ctx) { - i = idx - break - } - } +// cleanTestDatabase recreates a dirty DB obtained from the pool. +// It is created according to the given template. +func (m *Manager) cleanTestDatabase(ctx context.Context, testDB db.TestDatabase, templateDBName string) (db.TestDatabase, error) { + if err := m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateDBName); err != nil { + return db.TestDatabase{}, err + } - if err := m.dropDatabase(ctx, template.testDatabases[i].Config.Database); err != nil { - return nil, err - } + return testDB, nil +} - // Delete while preserving order, avoiding memory leaks due to points in accordance to: https://github.com/golang/go/wiki/SliceTricks - if i < len(template.testDatabases)-1 { - copy(template.testDatabases[i:], template.testDatabases[i+1:]) - } - template.testDatabases[len(template.testDatabases)-1] = nil - template.testDatabases = template.testDatabases[:len(template.testDatabases)-1] +// createTestDatabaseFromTemplate adds a new test database in the pool (increasing its size) basing on the given template. +// It waits until the template is ready. +func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template *templates.Template) (db.TestDatabase, error) { + if template.WaitUntilReady(ctx, m.config.TestDatabaseWaitTimeout) != templates.TemplateStateReady { + // if the state changed in the meantime, return + return db.TestDatabase{}, ErrInvalidTemplateState + } + + dbNamePrefix := m.makeTestDatabasePrefix(template.TemplateHash) + testDB, err := m.pool.AddTestDatabase(ctx, template.Database, dbNamePrefix, func(testDB db.TestDatabase) error { + return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, template.Config.Database) + }) + + if err != nil { + return db.TestDatabase{}, err } return testDB, nil } // Adds new test databases for a template, intended to be run asynchronously from other operations in a separate goroutine, using the manager's WaitGroup to synchronize for shutdown. -// This function will lock `template` until all requested test DBs have been created and signal the WaitGroup about completion afterwards. -func (m *Manager) addTestDatabasesInBackground(template *TemplateDatabase, count int) { +func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Template, count int) { - template.Lock() - defer template.Unlock() + ctx, cancel := context.WithCancel(context.Background()) + + m.wg.Add(1) + go func() { + defer m.wg.Done() + defer cancel() - ctx := context.Background() + for i := 0; i < count; i++ { + // TODO log error somewhere instead of silently swallowing it? + _, _ = m.createTestDatabaseFromTemplate(ctx, template) + } + }() - for i := 0; i < count; i++ { - // TODO log error somewhere instead of silently swallowing it? - _, _ = m.createNextTestDatabase(ctx, template) + select { + case <-m.closeChan: + // manager was requested to stop + cancel() + case <-ctx.Done(): } } diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index e1da06e..c00a7f5 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -1,6 +1,8 @@ package manager import ( + "time" + "github.com/allaboutapps/integresql/pkg/util" ) @@ -15,6 +17,7 @@ type ManagerConfig struct { TestDatabaseOwnerPassword string TestDatabaseInitialPoolSize int TestDatabaseMaxPoolSize int + TestDatabaseWaitTimeout time.Duration } func DefaultManagerConfigFromEnv() ManagerConfig { @@ -51,5 +54,6 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), + TestDatabaseWaitTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB READY_WAIT_TIMEOUT", 1000)), } } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 7155b30..d745417 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -3,7 +3,6 @@ package manager import ( "context" "database/sql" - "errors" "fmt" "sync" "testing" @@ -298,10 +297,6 @@ func TestManagerGetTestDatabase(t *testing.T) { t.Fatalf("failed to get test database: %v", err) } - if !test.Ready(ctx) { - t.Error("test database is flagged not ready") - } - verifyTestDB(t, test) } @@ -356,21 +351,8 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { testCh := make(chan error, 1) go func() { - test, err := m.GetTestDatabase(ctx, hash) - if err != nil { - testCh <- err - return - } - - if !test.Ready(ctx) { - testCh <- errors.New("test database is flagged as not ready") - return - } - if !test.Dirty(ctx) { - testCh <- errors.New("test database is not flagged as dirty") - } - - testCh <- nil + _, err := m.GetTestDatabase(ctx, hash) + testCh <- err }() populateTemplateDB(t, template) diff --git a/pkg/manager/template_collection copy.go b/pkg/manager/template_collection copy.go deleted file mode 100644 index cfa1e42..0000000 --- a/pkg/manager/template_collection copy.go +++ /dev/null @@ -1,126 +0,0 @@ -package manager - -import ( - "context" - "runtime/trace" - "sync" -) - -type TemplateCollection struct { - templates map[string]TemplateConfig - templateMutex sync.RWMutex -} - -type Unlock func() - -func NewTemplateCollection() *TemplateCollection { - return &TemplateCollection{ - templates: make(map[string]TemplateConfig), - templateMutex: sync.RWMutex{}, - } -} - -func (tc *TemplateCollection) Add(ctx context.Context, hash string, template TemplateConfig) (added bool) { - reg := trace.StartRegion(ctx, "get_template_lock") - defer reg.End() - - tc.templateMutex.Lock() - defer tc.templateMutex.Unlock() - - _, ok := tc.templates[hash] - if ok { - return false - } - - tc.templates[hash] = template - return true -} - -func (tc *TemplateCollection) Pop(ctx context.Context, hash string) TemplateConfig { - reg := trace.StartRegion(ctx, "get_template_lock") - defer reg.End() - - tc.templateMutex.Lock() - defer tc.templateMutex.Unlock() - - template, ok := tc.templates[hash] - if !ok { - return TemplateConfig{} - } - - delete(tc.templates, hash) - return template -} - -func (tc *TemplateCollection) Remove(ctx context.Context, hash string) { - reg := trace.StartRegion(ctx, "get_template_lock") - defer reg.End() - - tc.templateMutex.Lock() - defer tc.templateMutex.Unlock() - - delete(tc.templates, hash) -} - -// func (tc *TemplateCollection) Get1(ctx context.Context, hash string) (*TemplateConfig, Unlock) { -// reg := trace.StartRegion(ctx, "get_template_lock") -// tc.templateMutex.Lock() - -// unlockFunc := func() { -// tc.templateMutex.Unlock() -// reg.End() -// } - -// template, ok := tc.templates[hash] -// if !ok { -// return nil, unlockFunc -// } -// return template, unlockFunc -// } - -// func (tc *TemplateCollection) GetForReading1(ctx context.Context, hash string) (*TemplateConfig, Unlock) { -// reg := trace.StartRegion(ctx, "get_template_lock") -// tc.templateMutex.RLock() - -// unlockFunc := func() { -// tc.templateMutex.RUnlock() -// reg.End() -// } - -// template, ok := tc.templates[hash] -// if !ok { -// return nil, unlockFunc -// } -// return template, unlockFunc -// } - -// func (tc *TemplateCollection) GetAllForReading1() (map[string]*TemplateConfig, Unlock) { -// tc.templateMutex.RLock() - -// unlockFunc := func() { -// tc.templateMutex.RUnlock() -// } - -// return tc.templates, unlockFunc -// } - -// func (tc *TemplateCollection) Reset1() { -// tc.templateMutex.Lock() -// defer tc.templateMutex.Unlock() - -// tc.templates = map[string]*TemplateConfig{} -// } - -// func (tc *TemplateCollection) RemoveUnsafe1(hash string) { -// // tc.templateMutex.Lock() -// // defer tc.templateMutex.Unlock() - -// delete(tc.templates, hash) -// } - -// func (tc *TemplateCollection) AddUnsafe1(hash string, template *TemplateConfig) { -// // tc.templateMutex.Lock() -// // defer tc.templateMutex.Unlock() - -// tc.templates[hash] = template -// } diff --git a/pkg/manager/testing.go b/pkg/manager/testing.go index c540bac..b79e0c8 100644 --- a/pkg/manager/testing.go +++ b/pkg/manager/testing.go @@ -106,7 +106,7 @@ func populateTemplateDB(t *testing.T, template db.Database) { } } -func verifyTestDB(t *testing.T, test *TestDatabase) { +func verifyTestDB(t *testing.T, test db.TestDatabase) { t.Helper() db, err := sql.Open("postgres", test.Config.ConnectionString()) @@ -142,19 +142,6 @@ func verifyTestDB(t *testing.T, test *TestDatabase) { func getTestDB(ctx context.Context, errs chan<- error, m *Manager) { - db, err := m.GetTestDatabase(context.Background(), "hashinghash") - if err != nil { - errs <- err - return - } - - if !db.Ready(ctx) { - errs <- errors.New("test database is marked as not ready") - return - } - if !db.Dirty(ctx) { - errs <- errors.New("test database is not marked as dirty") - } - - errs <- nil + _, err := m.GetTestDatabase(context.Background(), "hashinghash") + errs <- err } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 0435b8d..9303f1f 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -10,16 +10,16 @@ import ( ) var ( - ErrNoPool = errors.New("no db.Database exists for this hash") + ErrUnknownHash = errors.New("no db.Database exists for this hash") ErrPoolFull = errors.New("database pool is full") - ErrNotInPool = errors.New("database is not in the pool") + ErrUnknownID = errors.New("database is not in the pool") ErrNoDBReady = errors.New("no db.Database is currently ready, perhaps you need to create one") ErrInvalidIndex = errors.New("invalid db.Database index (ID)") ) type DBPool struct { pools map[string]*dbHashPool // map[hash] - sync.RWMutex + mutex sync.RWMutex maxPoolSize int } @@ -66,23 +66,23 @@ func (p *DBPool) GetDB(ctx context.Context, hash string) (db db.TestDatabase, is { // ! // DBPool locked - p.Lock() - defer p.Unlock() + p.mutex.Lock() + defer p.mutex.Unlock() pool = p.pools[hash] // DBPool unlocked // ! - } - if pool == nil { - // no such pool - err = ErrNoPool - return - } + if pool == nil { + // no such pool + err = ErrUnknownHash + return + } - // ! - // dbHashPool locked - pool.Lock() + // ! + // dbHashPool locked + pool.Lock() + } defer pool.Unlock() var index int @@ -125,8 +125,8 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, template db.Database, dbNa { // ! // DBPool locked - p.Lock() - defer p.Unlock() + p.mutex.Lock() + defer p.mutex.Unlock() pool = p.pools[hash] if pool == nil { @@ -181,8 +181,8 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er { // ! // DBPool locked - p.Lock() - defer p.Unlock() + p.mutex.Lock() + defer p.mutex.Unlock() // needs to be checked inside locked region // because we access maxPoolSize @@ -193,23 +193,23 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er pool = p.pools[hash] // DBPool unlocked // ! - } - if pool == nil { - // no such pool - return ErrNoPool - } + if pool == nil { + // no such pool + return ErrUnknownHash + } - // ! - // dbHashPool locked - pool.Lock() + // ! + // dbHashPool locked + pool.Lock() + } defer pool.Unlock() // check if pool has been already returned if pool.dirty != nil && len(pool.dirty) > 0 { exists := pool.dirty[id] if exists { - return ErrNotInPool + return ErrUnknownID } } diff --git a/pkg/templates/template.go b/pkg/templates/template.go index ec621d2..45e42da 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -20,8 +20,8 @@ type Template struct { db.Database state TemplateState - cond *sync.Cond - lock sync.RWMutex + cond *sync.Cond + mutex sync.RWMutex } func NewTemplate(database db.Database) *Template { @@ -29,14 +29,14 @@ func NewTemplate(database db.Database) *Template { Database: database, state: TemplateStateInit, } - t.cond = sync.NewCond(&t.lock) + t.cond = sync.NewCond(&t.mutex) return t } func (t *Template) GetState(ctx context.Context) TemplateState { - t.lock.RLock() - defer t.lock.RUnlock() + t.mutex.RLock() + defer t.mutex.RUnlock() return t.state } @@ -46,8 +46,8 @@ func (t *Template) SetState(ctx context.Context, newState TemplateState) { return } - t.lock.Lock() - defer t.lock.Unlock() + t.mutex.Lock() + defer t.mutex.Unlock() t.state = newState t.cond.Broadcast() diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go index 4421c35..71854d5 100644 --- a/pkg/templates/template_collection.go +++ b/pkg/templates/template_collection.go @@ -40,22 +40,19 @@ func (tc *Collection) Push(ctx context.Context, hash string, template db.Databas return true, unlock } -func (tc *Collection) Pop(ctx context.Context, hash string) (template *Template, found bool, unlock Unlock) { +func (tc *Collection) Pop(ctx context.Context, hash string) (template *Template, found bool) { reg := trace.StartRegion(ctx, "get_template_lock") + defer reg.End() tc.collMutex.Lock() - - unlock = func() { - tc.collMutex.Unlock() - reg.End() - } + defer tc.collMutex.Unlock() template, ok := tc.templates[hash] if !ok { - return nil, false, unlock + return nil, false } delete(tc.templates, hash) - return template, true, unlock + return template, true } func (tc *Collection) Get(ctx context.Context, hash string) (template *Template, found bool) { From 173ae53a31e6fdedd191127e9a391e6406ce5166 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 20 Jun 2023 11:42:07 +0000 Subject: [PATCH 022/160] adapt reset all templates --- internal/api/admin/admin.go | 3 +- pkg/manager/manager.go | 59 +++++------------------- pkg/manager/manager_test.go | 2 +- pkg/pool/pool.go | 68 ++++++++++++++++++++++++++-- pkg/templates/template_collection.go | 18 +++++++- 5 files changed, 95 insertions(+), 55 deletions(-) diff --git a/internal/api/admin/admin.go b/internal/api/admin/admin.go index bb6a68b..3bf1662 100644 --- a/internal/api/admin/admin.go +++ b/internal/api/admin/admin.go @@ -9,7 +9,8 @@ import ( func deleteResetAllTemplates(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { - if err := s.Manager.ResetAllTracking(); err != nil { + ctx := c.Request().Context() + if err := s.Manager.ResetAllTracking(ctx); err != nil { return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 82bfb2d..5872b5a 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -7,7 +7,6 @@ import ( "fmt" "runtime/trace" "sync" - "time" "github.com/allaboutapps/integresql/pkg/db" "github.com/allaboutapps/integresql/pkg/pool" @@ -309,66 +308,32 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e return m.pool.ReturnTestDatabase(ctx, hash, id) } -func (m *Manager) ClearTrackedTestDatabases(hash string) error { +func (m *Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) error { if !m.Ready() { return ErrManagerNotReady } - m.templateMutex.RLock() - template, ok := m.templates[hash] - m.templateMutex.RUnlock() - - if !ok { - return ErrTemplateNotFound - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - if err := template.WaitUntilReady(ctx); err != nil { - cancel() - return err + removeFunc := func(testDB db.TestDatabase) error { + return m.dropDatabase(ctx, testDB.Config.Database) } - cancel() - - template.Lock() - defer template.Unlock() - - for i := range template.testDatabases { - template.testDatabases[i] = nil - } - - template.testDatabases = make([]*TestDatabase, 0) - template.nextTestID = 0 - return nil + return m.pool.RemoveAllWithHash(ctx, hash, removeFunc) } -func (m *Manager) ResetAllTracking() error { +func (m *Manager) ResetAllTracking(ctx context.Context) error { if !m.Ready() { return ErrManagerNotReady } - m.templateMutex.Lock() - defer m.templateMutex.Unlock() + // remove all templates to disallow any new test DB creation + m.templatesX.RemoveAll(ctx) - for hash := range m.templates { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - if err := m.templates[hash].WaitUntilReady(ctx); err != nil { - cancel() - continue - } - cancel() - - m.templates[hash].Lock() - for i := range m.templates[hash].testDatabases { - m.templates[hash].testDatabases[i] = nil - } - m.templates[hash].Unlock() - - delete(m.templates, hash) - // m.templates[hash] = nil + removeFunc := func(testDB db.TestDatabase) error { + return m.dropDatabase(ctx, testDB.Config.Database) + } + if err := m.pool.RemoveAll(ctx, removeFunc); err != nil { + return err } - - m.templates = map[string]*TemplateDatabase{} return nil } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index d745417..ae90154 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -840,7 +840,7 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { originalID := test.ID - if err := m.ClearTrackedTestDatabases(hash); err != nil { + if err := m.ClearTrackedTestDatabases(ctx, hash); err != nil { t.Fatalf("failed to clear tracked test databases: %v", err) } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 9303f1f..de7cd30 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -70,8 +70,6 @@ func (p *DBPool) GetDB(ctx context.Context, hash string) (db db.TestDatabase, is defer p.mutex.Unlock() pool = p.pools[hash] - // DBPool unlocked - // ! if pool == nil { // no such pool @@ -82,6 +80,9 @@ func (p *DBPool) GetDB(ctx context.Context, hash string) (db db.TestDatabase, is // ! // dbHashPool locked pool.Lock() + + // DBPool unlocked + // ! } defer pool.Unlock() @@ -191,8 +192,6 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er } pool = p.pools[hash] - // DBPool unlocked - // ! if pool == nil { // no such pool @@ -202,6 +201,9 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // ! // dbHashPool locked pool.Lock() + + // DBPool unlocked + // ! } defer pool.Unlock() @@ -218,3 +220,61 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er return nil } + +func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc func(db.TestDatabase) error) error { + + // ! + // DBPool locked + p.mutex.Lock() + defer p.mutex.Unlock() + + pool := p.pools[hash] + + if pool == nil { + // no such pool + return ErrUnknownHash + } + + return p.removeAllFromPool(pool, removeFunc) + // DBPool unlocked + // ! +} + +func (p *DBPool) removeAllFromPool(pool *dbHashPool, removeFunc func(db.TestDatabase) error) error { + pool.Lock() + defer pool.Unlock() + + // remove from back to be able to repeat operation in case of error + for id := len(pool.dbs) - 1; id >= 0; id-- { + db := pool.dbs[id] + + if err := removeFunc(db); err != nil { + return err + } + + pool.dbs = pool.dbs[:len(pool.dbs)-1] + delete(pool.dirty, id) + delete(pool.ready, id) + } + + return nil +} + +func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) error) error { + // ! + // DBPool locked + p.mutex.Lock() + defer p.mutex.Unlock() + + for hash, pool := range p.pools { + if err := p.removeAllFromPool(pool, removeFunc); err != nil { + return err + } + + delete(p.pools, hash) + } + + return nil + // DBPool unlocked + // ! +} diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go index 71854d5..fe87a0d 100644 --- a/pkg/templates/template_collection.go +++ b/pkg/templates/template_collection.go @@ -59,8 +59,8 @@ func (tc *Collection) Get(ctx context.Context, hash string) (template *Template, reg := trace.StartRegion(ctx, "get_template_lock") defer reg.End() - tc.collMutex.Lock() - defer tc.collMutex.Unlock() + tc.collMutex.RLock() + defer tc.collMutex.RUnlock() template, ok := tc.templates[hash] if !ok { @@ -73,3 +73,17 @@ func (tc *Collection) Get(ctx context.Context, hash string) (template *Template, func (tc *Collection) RemoveUnsafe(ctx context.Context, hash string) { delete(tc.templates, hash) } + +func (tc *Collection) RemoveAll(ctx context.Context) { + reg := trace.StartRegion(ctx, "get_template_lock") + defer reg.End() + + tc.collMutex.Lock() + defer tc.collMutex.Unlock() + + for hash, template := range tc.templates { + template.SetState(ctx, TemplateStateDiscarded) + + delete(tc.templates, hash) + } +} From 80703127181c60209a0625aab7e9f334aab7b93b Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 20 Jun 2023 11:49:09 +0000 Subject: [PATCH 023/160] remove old implementation remainings --- internal/api/templates/templates.go | 5 +- pkg/{manager => db}/database_config_test.go | 2 +- pkg/manager/database.go | 115 -------------------- pkg/manager/database_config.go | 41 ------- pkg/manager/manager.go | 13 +-- pkg/manager/manager_config.go | 5 +- pkg/manager/manager_test.go | 3 +- pkg/manager/template_database.go | 18 --- pkg/manager/test_database.go | 42 ------- tests/testclient/client.go | 3 +- 10 files changed, 16 insertions(+), 231 deletions(-) rename pkg/{manager => db}/database_config_test.go (99%) delete mode 100644 pkg/manager/database.go delete mode 100644 pkg/manager/database_config.go delete mode 100644 pkg/manager/template_database.go delete mode 100644 pkg/manager/test_database.go diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 3dacac5..1412c0b 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -8,6 +8,7 @@ import ( "github.com/allaboutapps/integresql/internal/api" "github.com/allaboutapps/integresql/pkg/manager" + "github.com/allaboutapps/integresql/pkg/pool" "github.com/labstack/echo/v4" ) @@ -104,7 +105,7 @@ func getTestDatabase(s *api.Server) echo.HandlerFunc { return echo.ErrServiceUnavailable case manager.ErrTemplateNotFound: return echo.NewHTTPError(http.StatusNotFound, "template not found") - case manager.ErrDatabaseDiscarded: + case manager.ErrTemplateDiscarded: return echo.NewHTTPError(http.StatusGone, "template was just discarded") default: return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) @@ -132,7 +133,7 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return echo.ErrServiceUnavailable case manager.ErrTemplateNotFound: return echo.NewHTTPError(http.StatusNotFound, "template not found") - case manager.ErrTestNotFound: + case pool.ErrUnknownID: return echo.NewHTTPError(http.StatusNotFound, "test database not found") default: return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) diff --git a/pkg/manager/database_config_test.go b/pkg/db/database_config_test.go similarity index 99% rename from pkg/manager/database_config_test.go rename to pkg/db/database_config_test.go index 5d6c4fc..b81b209 100644 --- a/pkg/manager/database_config_test.go +++ b/pkg/db/database_config_test.go @@ -1,4 +1,4 @@ -package manager +package db import ( "encoding/json" diff --git a/pkg/manager/database.go b/pkg/manager/database.go deleted file mode 100644 index 33faf6b..0000000 --- a/pkg/manager/database.go +++ /dev/null @@ -1,115 +0,0 @@ -package manager - -import ( - "context" - "errors" - "runtime/trace" - "sync" -) - -type databaseState int - -const ( - databaseStateInit databaseState = iota - databaseStateDiscarded databaseState = iota - databaseStateReady databaseState = iota -) - -var ErrDatabaseDiscarded = errors.New("ErrDatabaseDiscarded") - -type Database struct { - sync.RWMutex `json:"-"` - - TemplateHash string `json:"templateHash"` - Config DatabaseConfig `json:"config"` - - state databaseState - c chan struct{} -} - -func (d *Database) State(ctx context.Context) databaseState { - reg := trace.StartRegion(ctx, "db_get_state") - defer reg.End() - - d.RLock() - defer d.RUnlock() - - return d.state -} - -func (d *Database) Ready(ctx context.Context) bool { - reg := trace.StartRegion(ctx, "db_check_ready") - defer reg.End() - - d.RLock() - defer d.RUnlock() - - return d.state == databaseStateReady -} - -func (d *Database) WaitUntilReady(ctx context.Context) error { - reg := trace.StartRegion(ctx, "db_wait_ready") - defer reg.End() - - state := d.State(ctx) - - if state == databaseStateReady { - return nil - } else if state == databaseStateDiscarded { - return ErrDatabaseDiscarded - } - - for { - select { - case <-d.c: - state := d.State(ctx) - - if state == databaseStateReady { - return nil - } else if state == databaseStateDiscarded { - return ErrDatabaseDiscarded - } - - case <-ctx.Done(): - return ctx.Err() - } - } -} - -func (d *Database) FlagAsReady(ctx context.Context) { - reg := trace.StartRegion(ctx, "db_flag_ready") - defer reg.End() - - state := d.State(ctx) - if state != databaseStateInit { - return - } - - d.Lock() - defer d.Unlock() - - d.state = databaseStateReady - - if d.c != nil { - close(d.c) - } -} - -func (d *Database) FlagAsDiscarded(ctx context.Context) { - reg := trace.StartRegion(ctx, "db_flag_discarded") - defer reg.End() - - state := d.State(ctx) - if state != databaseStateInit { - return - } - - d.Lock() - defer d.Unlock() - - d.state = databaseStateDiscarded - - if d.c != nil { - close(d.c) - } -} diff --git a/pkg/manager/database_config.go b/pkg/manager/database_config.go deleted file mode 100644 index 4c351ae..0000000 --- a/pkg/manager/database_config.go +++ /dev/null @@ -1,41 +0,0 @@ -package manager - -import ( - "fmt" - "sort" - "strings" -) - -type DatabaseConfig struct { - Host string `json:"host"` - Port int `json:"port"` - Username string `json:"username"` - Password string `json:"password"` - Database string `json:"database"` - AdditionalParams map[string]string `json:"additionalParams,omitempty"` // Optional additional connection parameters mapped into the connection string -} - -// Generates a connection string to be passed to sql.Open or equivalents, assuming Postgres syntax -func (c DatabaseConfig) ConnectionString() string { - var b strings.Builder - b.WriteString(fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s", c.Host, c.Port, c.Username, c.Password, c.Database)) - - if _, ok := c.AdditionalParams["sslmode"]; !ok { - b.WriteString(" sslmode=disable") - } - - if len(c.AdditionalParams) > 0 { - params := make([]string, 0, len(c.AdditionalParams)) - for param := range c.AdditionalParams { - params = append(params, param) - } - - sort.Strings(params) - - for _, param := range params { - fmt.Fprintf(&b, " %s=%s", param, c.AdditionalParams[param]) - } - } - - return b.String() -} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 5872b5a..9694c10 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -19,15 +19,13 @@ var ( ErrTemplateAlreadyInitialized = errors.New("template is already initialized") ErrTemplateNotFound = errors.New("template not found") ErrInvalidTemplateState = errors.New("unexpected template state") - ErrTestNotFound = errors.New("test database not found") + ErrTemplateDiscarded = errors.New("template is discarded, can't be used") ) type Manager struct { - config ManagerConfig - db *sql.DB - templates map[string]*TemplateDatabase - templateMutex sync.RWMutex - wg sync.WaitGroup + config ManagerConfig + db *sql.DB + wg sync.WaitGroup closeChan chan bool templatesX *templates.Collection @@ -38,7 +36,6 @@ func New(config ManagerConfig) *Manager { m := &Manager{ config: config, db: nil, - templates: map[string]*TemplateDatabase{}, wg: sync.WaitGroup{}, templatesX: templates.NewCollection(), pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize), @@ -239,7 +236,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db // Disallow transition from discarded to ready if state == templates.TemplateStateDiscarded { - return db.Database{}, ErrDatabaseDiscarded + return db.Database{}, ErrTemplateDiscarded } template.SetState(ctx, templates.TemplateStateReady) diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index c00a7f5..0fb12cf 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -3,11 +3,12 @@ package manager import ( "time" + "github.com/allaboutapps/integresql/pkg/db" "github.com/allaboutapps/integresql/pkg/util" ) type ManagerConfig struct { - ManagerDatabaseConfig DatabaseConfig + ManagerDatabaseConfig db.DatabaseConfig TemplateDatabaseTemplate string DatabasePrefix string @@ -24,7 +25,7 @@ func DefaultManagerConfigFromEnv() ManagerConfig { return ManagerConfig{ - ManagerDatabaseConfig: DatabaseConfig{ + ManagerDatabaseConfig: db.DatabaseConfig{ Host: util.GetEnv("INTEGRESQL_PGHOST", util.GetEnv("PGHOST", "127.0.0.1")), Port: util.GetEnvAsInt("INTEGRESQL_PGPORT", util.GetEnvAsInt("PGPORT", 5432)), diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index ae90154..8fe5ba9 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/allaboutapps/integresql/pkg/db" "github.com/lib/pq" "github.com/stretchr/testify/assert" ) @@ -31,7 +32,7 @@ func TestManagerConnectError(t *testing.T) { t.Parallel() m := New(ManagerConfig{ - ManagerDatabaseConfig: DatabaseConfig{ + ManagerDatabaseConfig: db.DatabaseConfig{ Host: "definitelydoesnotexist", Port: 2345, Username: "definitelydoesnotexist", diff --git a/pkg/manager/template_database.go b/pkg/manager/template_database.go deleted file mode 100644 index de56d0a..0000000 --- a/pkg/manager/template_database.go +++ /dev/null @@ -1,18 +0,0 @@ -package manager - -type TemplateConfig struct { - TemplateHash string `json:"templateHash"` - Config DatabaseConfig `json:"config"` - nextTestID int -} - -func (c TemplateConfig) IsEmpty() bool { - return c.TemplateHash == "" -} - -type TemplateDatabase struct { - Database `json:"database"` - - nextTestID int - testDatabases []*TestDatabase -} diff --git a/pkg/manager/test_database.go b/pkg/manager/test_database.go deleted file mode 100644 index f378cf0..0000000 --- a/pkg/manager/test_database.go +++ /dev/null @@ -1,42 +0,0 @@ -package manager - -import "context" - -type TestDatabase struct { - Database `json:"database"` - - ID int `json:"id"` - - dirty bool -} - -func (t *TestDatabase) Dirty(ctx context.Context) bool { - t.RLock() - defer t.RUnlock() - - return t.dirty -} - -func (t *TestDatabase) FlagAsDirty(ctx context.Context) { - t.Lock() - defer t.Unlock() - - t.dirty = true -} - -func (t *TestDatabase) FlagAsClean(ctx context.Context) { - t.Lock() - defer t.Unlock() - - t.dirty = false -} - -func (t *TestDatabase) ReadyForTest(ctx context.Context) bool { - return t.Ready(ctx) && !t.Dirty(ctx) -} - -type ByID []*TestDatabase - -func (i ByID) Len() int { return len(i) } -func (a ByID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByID) Less(i, j int) bool { return a[i].ID < a[j].ID } diff --git a/tests/testclient/client.go b/tests/testclient/client.go index 0553afd..e416f78 100644 --- a/tests/testclient/client.go +++ b/tests/testclient/client.go @@ -15,6 +15,7 @@ import ( "path" "github.com/allaboutapps/integresql/pkg/manager" + "github.com/allaboutapps/integresql/pkg/pool" "github.com/allaboutapps/integresql/pkg/util" _ "github.com/lib/pq" ) @@ -219,7 +220,7 @@ func (c *Client) GetTestDatabase(ctx context.Context, hash string) (TestDatabase case http.StatusNotFound: return test, manager.ErrTemplateNotFound case http.StatusGone: - return test, manager.ErrDatabaseDiscarded + return test, pool.ErrUnknownID case http.StatusServiceUnavailable: return test, manager.ErrManagerNotReady default: From f4038729a93acfeed9a9341514631cf16d3456ff Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 20 Jun 2023 14:11:39 +0200 Subject: [PATCH 024/160] update to go 1.20, update Dockerfile pkgs, use development user --- Dockerfile | 106 +++++++++++++++++++++++++++++++++++---------- docker-compose.yml | 2 +- go.mod | 17 ++++++-- go.sum | 4 -- tools.go | 2 +- 5 files changed, 100 insertions(+), 31 deletions(-) diff --git a/Dockerfile b/Dockerfile index 3684240..1e46759 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,22 +1,33 @@ -FROM golang:1.17.13 AS development +### ----------------------- +# --- Stage: development +# --- Purpose: Local development environment +# --- https://hub.docker.com/_/golang +# --- https://github.com/microsoft/vscode-remote-try-go/blob/master/.devcontainer/Dockerfile +### ----------------------- +FROM golang:1.20.5-bullseye AS development -# https://github.com/go-modules-by-example/index/blob/master/010_tools/README.md#walk-through -ENV GOBIN /app/bin -ENV PATH $GOBIN:$PATH +# Avoid warnings by switching to noninteractive +ENV DEBIAN_FRONTEND=noninteractive + +# Our Makefile / env fully supports parallel job execution +ENV MAKEFLAGS "-j 8 --no-print-directory" # postgresql-support: Add the official postgres repo to install the matching postgresql-client tools of your stack -# see https://wiki.postgresql.org/wiki/Apt +# https://wiki.postgresql.org/wiki/Apt # run lsb_release -c inside the container to pick the proper repository flavor -# e.g. stretch=>stretch-pgdg, buster=>buster-pgdg -RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ buster-pgdg main" \ +# e.g. stretch=>stretch-pgdg, buster=>buster-pgdg, bullseye=>bullseye-pgdg +RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ bullseye-pgdg main" \ | tee /etc/apt/sources.list.d/pgdg.list \ && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc \ | apt-key add - + # Install required system dependencies RUN apt-get update \ - && apt-get install -y --no-install-recommends \ + && apt-get install -y \ locales \ + sudo \ + bash-completion \ postgresql-client-12 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* @@ -29,32 +40,83 @@ RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ ENV LANG en_US.UTF-8 -# sql-formatting: Install the same version of pg_formatter as used in your editors, as of 2020-03 thats v4.2 -# https://github.com/darold/pgFormatter/releases +# sql pgFormatter: Integrates with vscode-pgFormatter (we pin pgFormatter.pgFormatterPath for the extension to this version) +# requires perl to be installed # https://github.com/bradymholt/vscode-pgFormatter/commits/master -RUN wget https://github.com/darold/pgFormatter/archive/v4.2.tar.gz \ - && tar xzf v4.2.tar.gz \ - && cd pgFormatter-4.2 \ +# https://github.com/darold/pgFormatter/releases +RUN mkdir -p /tmp/pgFormatter \ + && cd /tmp/pgFormatter \ + && wget https://github.com/darold/pgFormatter/archive/v5.3.tar.gz \ + && tar xzf v5.3.tar.gz \ + && cd pgFormatter-5.3 \ && perl Makefile.PL \ - && make && make install + && make && make install \ + && rm -rf /tmp/pgFormatter -# go richgo: (this package should NOT be installed via go get) -# https://github.com/kyoh86/richgo/releases -RUN wget https://github.com/kyoh86/richgo/releases/download/v0.3.3/richgo_0.3.3_linux_amd64.tar.gz \ - && tar xzf richgo_0.3.3_linux_amd64.tar.gz \ - && cp richgo /usr/local/bin/richgo +# go gotestsum: (this package should NOT be installed via go get) +# https://github.com/gotestyourself/gotestsum/releases +RUN mkdir -p /tmp/gotestsum \ + && cd /tmp/gotestsum \ + && ARCH="$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)" \ + && wget "https://github.com/gotestyourself/gotestsum/releases/download/v1.9.0/gotestsum_1.9.0_linux_${ARCH}.tar.gz" \ + && tar xzf "gotestsum_1.9.0_linux_${ARCH}.tar.gz" \ + && cp gotestsum /usr/local/bin/gotestsum \ + && rm -rf /tmp/gotestsum # go linting: (this package should NOT be installed via go get) # https://github.com/golangci/golangci-lint#binary +# https://github.com/golangci/golangci-lint/releases RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh \ - | sh -s -- -b $(go env GOPATH)/bin v1.24.0 + | sh -s -- -b $(go env GOPATH)/bin v1.52.2 # go swagger: (this package should NOT be installed via go get) # https://github.com/go-swagger/go-swagger/releases -RUN curl -o /usr/local/bin/swagger -L'#' \ - "https://github.com/go-swagger/go-swagger/releases/download/v0.23.0/swagger_linux_amd64" \ +RUN ARCH="$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)" \ + && curl -o /usr/local/bin/swagger -L'#' \ + "https://github.com/go-swagger/go-swagger/releases/download/v0.29.0/swagger_linux_${ARCH}" \ && chmod +x /usr/local/bin/swagger +# linux permissions / vscode support: Add user to avoid linux file permission issues +# Detail: Inside the container, any mounted files/folders will have the exact same permissions +# as outside the container - including the owner user ID (UID) and group ID (GID). +# Because of this, your container user will either need to have the same UID or be in a group with the same GID. +# The actual name of the user / group does not matter. The first user on a machine typically gets a UID of 1000, +# so most containers use this as the ID of the user to try to avoid this problem. +# 2020-04: docker-compose does not support passing id -u / id -g as part of its config, therefore we assume uid 1000 +# https://code.visualstudio.com/docs/remote/containers-advanced#_adding-a-nonroot-user-to-your-dev-container +# https://code.visualstudio.com/docs/remote/containers-advanced#_creating-a-nonroot-user +ARG USERNAME=development +ARG USER_UID=1000 +ARG USER_GID=$USER_UID + +RUN groupadd --gid $USER_GID $USERNAME \ + && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME \ + && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ + && chmod 0440 /etc/sudoers.d/$USERNAME + + +# vscode support: cached extensions install directory +# https://code.visualstudio.com/docs/remote/containers-advanced#_avoiding-extension-reinstalls-on-container-rebuild +RUN mkdir -p /home/$USERNAME/.vscode-server/extensions \ + /home/$USERNAME/.vscode-server-insiders/extensions \ + && chown -R $USERNAME \ + /home/$USERNAME/.vscode-server \ + /home/$USERNAME/.vscode-server-insiders + +# linux permissions / vscode support: chown $GOPATH so $USERNAME can directly work with it +# Note that this should be the final step after installing all build deps +RUN mkdir -p /$GOPATH/pkg && chown -R $USERNAME /$GOPATH + + +# $GOBIN is where our own compiled binaries will live and other go.mod / VSCode binaries will be installed. +# It should always come AFTER our other $PATH segments and should be earliest targeted in stage "builder", +# as /app/bin will the shadowed by a volume mount via docker-compose! +# E.g. "which golangci-lint" should report "/go/bin" not "/app/bin" (where VSCode will place it). +# https://github.com/go-modules-by-example/index/blob/master/010_tools/README.md#walk-through +WORKDIR /app +ENV GOBIN /app/bin +ENV PATH $PATH:$GOBIN + ### ----------------------- # --- Stage: builder ### ----------------------- diff --git a/docker-compose.yml b/docker-compose.yml index 91b5922..841a743 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -31,7 +31,7 @@ services: command: /bin/sh -c "while sleep 1000; do :; done" postgres: - image: postgres:12.2-alpine # should be the same version as used in .drone.yml, Dockerfile and live + image: postgres:12.4-alpine # should be the same version as used in .drone.yml, Dockerfile and live command: "postgres -c 'shared_buffers=128MB' -c 'fsync=off' -c 'synchronous_commit=off' -c 'full_page_writes=off' -c 'max_connections=100' -c 'client_min_messages=warning'" expose: - "5432" diff --git a/go.mod b/go.mod index 1230b4d..1470be5 100644 --- a/go.mod +++ b/go.mod @@ -1,17 +1,28 @@ module github.com/allaboutapps/integresql -go 1.14 +go 1.20 require ( - github.com/davecgh/go-spew v1.1.1 // indirect github.com/google/uuid v1.3.0 - github.com/kr/pretty v0.2.1 // indirect github.com/labstack/echo/v4 v4.1.16 github.com/lib/pq v1.3.0 github.com/stretchr/testify v1.7.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/kr/pretty v0.2.1 // indirect + github.com/labstack/gommon v0.3.0 // indirect + github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.1.0 // indirect golang.org/x/crypto v0.0.0-20200420104511-884d27f42877 // indirect golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect golang.org/x/text v0.3.7 // indirect gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) diff --git a/go.sum b/go.sum index 3830e28..4fd1ca1 100644 --- a/go.sum +++ b/go.sum @@ -48,14 +48,10 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/tools.go b/tools.go index 37c9136..6517aa3 100644 --- a/tools.go +++ b/tools.go @@ -1,4 +1,4 @@ -// +build tools +//go:build tools // Tooling dependencies // https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module From cd34798b5a054475be8ee3e2a09b9e1707851a16 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 20 Jun 2023 12:36:43 +0000 Subject: [PATCH 025/160] update devcontainer settings --- .devcontainer/devcontainer.json | 175 +++++++++++++++++--------------- 1 file changed, 94 insertions(+), 81 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 30cfe01..a8f6bd0 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -15,93 +15,106 @@ // connected. This is typically a file mount in .devcontainer/docker-compose.yml "workspaceFolder": "/app", // Set *default* container specific settings.json values on container create. - "settings": { - "terminal.integrated.shell.linux": null, - // https://github.com/golang/tools/blob/master/gopls/doc/vscode.md#vscode - "go.useLanguageServer": true, - "[go]": { - "editor.formatOnSave": true, - "editor.codeActionsOnSave": { - "source.organizeImports": true, + "customizations": { + "vscode": { + "settings": { + // https://github.com/golang/tools/blob/master/gopls/doc/vscode.md#vscode + "go.useLanguageServer": true, + "[go]": { + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.organizeImports": true, + }, + // Optional: Disable snippets, as they conflict with completion ranking. + "editor.snippetSuggestions": "none", + }, + "[go.mod]": { + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.organizeImports": true, + }, + }, + "[sql]": { + "editor.formatOnSave": true + }, + "gopls": { + // Add parameter placeholders when completing a function. + "usePlaceholders": true, + // If true, enable additional analyses with staticcheck. + // Warning: This will significantly increase memory usage. + // DISABLED, done via + "staticcheck": false, + }, + // https://github.com/golangci/golangci-lint#editor-integration + "go.lintTool": "golangci-lint", + "go.lintFlags": [ + "--fast", + "--timeout", + "5m" + ], + // disable test caching, race and show coverage (in sync with makefile) + "go.testFlags": [ + "-cover", + "-race", + "-count=1", + "-v" + ], + "go.coverMode": "atomic", // atomic is required when utilizing -race + "go.delveConfig": { + "dlvLoadConfig": { + // increase max length of strings displayed in debugger + "maxStringLen": 2048, + }, + "apiVersion": 2, + }, + // ensure that the pgFormatter VSCode extension uses the pgFormatter that comes preinstalled in the Dockerfile + "pgFormatter.pgFormatterPath": "/usr/local/bin/pg_format" + // "go.lintOnSave": "workspace" + // general build settings in sync with our makefile + // "go.buildFlags": [ + // "-o", + // "bin/app" + // ] + // "sqltools.connections": [ + // { + // "database": "sample", + // "dialect": "PostgreSQL", + // "name": "postgres", + // "password": "9bed16f749d74a3c8bfbced18a7647f5", + // "port": 5432, + // "server": "postgres", + // "username": "dbuser" + // } + // ], + // "sqltools.autoConnectTo": [ + // "postgres" + // ], + // // only use pg_format to actually format! + // "sqltools.formatLanguages": [], + // "sqltools.telemetry": false, + // "sqltools.autoOpenSessionFiles": false }, - // Optional: Disable snippets, as they conflict with completion ranking. - "editor.snippetSuggestions": "none", - }, - "[go.mod]": { - "editor.formatOnSave": true, - "editor.codeActionsOnSave": { - "source.organizeImports": true, - }, - }, - "[sql]": { - "editor.formatOnSave": true - }, - "gopls": { - // Add parameter placeholders when completing a function. - "usePlaceholders": true, - // If true, enable additional analyses with staticcheck. - // Warning: This will significantly increase memory usage. - // DISABLED, done via - "staticcheck": false, - }, - // https://code.visualstudio.com/docs/languages/go#_intellisense - "go.autocompleteUnimportedPackages": true, - // https://github.com/golangci/golangci-lint#editor-integration - "go.lintTool": "golangci-lint", - "go.lintFlags": [ - "--fast" - ], - // disable test caching, race and show coverage (in sync with makefile) - "go.testFlags": [ - "-cover", - "-race", - "-count=1", - "-v" - ], - // "go.lintOnSave": "workspace" - // general build settings in sync with our makefile - // "go.buildFlags": [ - // "-o", - // "bin/app" - // ] - // "sqltools.connections": [ - // { - // "database": "sample", - // "dialect": "PostgreSQL", - // "name": "postgres", - // "password": "9bed16f749d74a3c8bfbced18a7647f5", - // "port": 5432, - // "server": "postgres", - // "username": "dbuser" - // } - // ], - // "sqltools.autoConnectTo": [ - // "postgres" - // ], - // // only use pg_format to actually format! - // "sqltools.formatLanguages": [], - // "sqltools.telemetry": false, - // "sqltools.autoOpenSessionFiles": false + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + // required: + "golang.go", + "bradymholt.pgformatter", + // optional: + // "766b.go-outliner", + "heaths.vscode-guid", + "bungcip.better-toml", + "eamodio.gitlens", + "casualjim.gotemplate" + // "mtxr.sqltools", + ] + } }, - // Add the IDs of extensions you want installed when the container is created. - "extensions": [ - // required: - "ms-vscode.go", - "bradymholt.pgformatter", - // optional: - // "766b.go-outliner", - "heaths.vscode-guid", - "bungcip.better-toml", - "eamodio.gitlens", - "casualjim.gotemplate" - // "mtxr.sqltools", - ] // Uncomment the next line if you want start specific services in your Docker Compose config. // "runServices": [], // Uncomment the next line if you want to keep your containers running after VS Code shuts down. // "shutdownAction": "none", // Uncomment the next line to run commands after the container is created - for example installing git. - // "postCreateCommand": "apt-get update && apt-get install -y git", + "postCreateCommand": "go version", // Uncomment to connect as a non-root user. See https://aka.ms/vscode-remote/containers/non-root. - // "remoteUser": "vscode" + "remoteUser": "development" } \ No newline at end of file From 937b493617974b91f2d334e985b19781a9eb8aae Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 20 Jun 2023 12:54:07 +0000 Subject: [PATCH 026/160] fix unlocking DBPool --- pkg/pool/pool.go | 111 ++++++++++++++++++++++++----------------------- 1 file changed, 56 insertions(+), 55 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index de7cd30..3a297c6 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -61,31 +61,29 @@ func popFirstKey(idMap dbIDMap) int { } func (p *DBPool) GetDB(ctx context.Context, hash string) (db db.TestDatabase, isDirty bool, err error) { - var pool *dbHashPool - { - // ! - // DBPool locked - p.mutex.Lock() - defer p.mutex.Unlock() - - pool = p.pools[hash] - - if pool == nil { - // no such pool - err = ErrUnknownHash - return - } + // ! + // DBPool locked + p.mutex.Lock() - // ! - // dbHashPool locked - pool.Lock() + pool := p.pools[hash] - // DBPool unlocked - // ! + if pool == nil { + // no such pool + p.mutex.Unlock() + err = ErrUnknownHash + return } + + // ! + // dbHashPool locked before unlocking DBPool + pool.Lock() defer pool.Unlock() + p.mutex.Unlock() + // DBPool unlocked + // ! + var index int if len(pool.ready) > 0 { // if there are some ready to be used DB, just get one @@ -120,22 +118,16 @@ func (p *DBPool) GetDB(ctx context.Context, hash string) (db db.TestDatabase, is } func (p *DBPool) AddTestDatabase(ctx context.Context, template db.Database, dbNamePrefix string, initFunc func(db.TestDatabase) error) (db.TestDatabase, error) { - var pool *dbHashPool hash := template.TemplateHash - { - // ! - // DBPool locked - p.mutex.Lock() - defer p.mutex.Unlock() + // ! + // DBPool locked + p.mutex.Lock() - pool = p.pools[hash] - if pool == nil { - pool = newDBHashPool(p.maxPoolSize) - p.pools[hash] = pool - } - // DBPool unlocked - // ! + pool := p.pools[hash] + if pool == nil { + pool = newDBHashPool(p.maxPoolSize) + p.pools[hash] = pool } // ! @@ -143,6 +135,10 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, template db.Database, dbNa pool.Lock() defer pool.Unlock() + p.mutex.Unlock() + // DBPool unlocked + // ! + // get index of a next test DB - its ID index := len(pool.dbs) if index >= p.maxPoolSize { @@ -177,36 +173,35 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, template db.Database, dbNa } func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { - var pool *dbHashPool - - { - // ! - // DBPool locked - p.mutex.Lock() - defer p.mutex.Unlock() - - // needs to be checked inside locked region - // because we access maxPoolSize - if id < 0 || id >= p.maxPoolSize { - return ErrInvalidIndex - } - pool = p.pools[hash] + // ! + // DBPool locked + p.mutex.Lock() - if pool == nil { - // no such pool - return ErrUnknownHash - } + // needs to be checked inside locked region + // because we access maxPoolSize + if id < 0 || id >= p.maxPoolSize { + p.mutex.Unlock() + return ErrInvalidIndex + } - // ! - // dbHashPool locked - pool.Lock() + pool := p.pools[hash] - // DBPool unlocked - // ! + if pool == nil { + // no such pool + p.mutex.Unlock() + return ErrUnknownHash } + + // ! + // dbHashPool locked + pool.Lock() defer pool.Unlock() + p.mutex.Unlock() + // DBPool unlocked + // ! + // check if pool has been already returned if pool.dirty != nil && len(pool.dirty) > 0 { exists := pool.dirty[id] @@ -219,6 +214,8 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er pool.dirty[id] = true return nil + // dbHashPool unlocked + // ! } func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc func(db.TestDatabase) error) error { @@ -241,6 +238,8 @@ func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc } func (p *DBPool) removeAllFromPool(pool *dbHashPool, removeFunc func(db.TestDatabase) error) error { + // ! + // dbHashPool locked pool.Lock() defer pool.Unlock() @@ -258,6 +257,8 @@ func (p *DBPool) removeAllFromPool(pool *dbHashPool, removeFunc func(db.TestData } return nil + // dbHashPool unlocked + // ! } func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) error) error { From 2c3da0db79db479dc25d8af6af37cf968a053095 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 21 Jun 2023 06:40:36 +0000 Subject: [PATCH 027/160] add WaitWithTimeout to util --- pkg/util/wait.go | 33 ++++++++++++++++++++++++++ pkg/util/wait_test.go | 54 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 pkg/util/wait.go create mode 100644 pkg/util/wait_test.go diff --git a/pkg/util/wait.go b/pkg/util/wait.go new file mode 100644 index 0000000..1255e91 --- /dev/null +++ b/pkg/util/wait.go @@ -0,0 +1,33 @@ +package util + +import ( + "context" + "errors" + "time" + + "golang.org/x/sync/errgroup" +) + +var ErrTimeout = errors.New("timeout while waiting for operation to complete") + +func WaitWithTimeout[T any](ctx context.Context, timeout time.Duration, operation func(context.Context) (T, error)) (T, error) { + cctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + resChan := make(chan T, 1) + g, cctx := errgroup.WithContext(cctx) + + g.Go(func() error { + res, err := operation(cctx) + resChan <- res + return err + }) + + select { + case res := <-resChan: + return res, g.Wait() + case <-time.After(timeout): + var empty T + return empty, ErrTimeout + } +} diff --git a/pkg/util/wait_test.go b/pkg/util/wait_test.go new file mode 100644 index 0000000..903645a --- /dev/null +++ b/pkg/util/wait_test.go @@ -0,0 +1,54 @@ +package util_test + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/allaboutapps/integresql/pkg/util" + "github.com/stretchr/testify/assert" +) + +func TestWaitWithTimeout(t *testing.T) { + ctx := context.Background() + type output struct { + A int + } + + // operation timeout + start := time.Now() + res, err := util.WaitWithTimeout(ctx, time.Millisecond*100, func(ctx context.Context) (output, error) { + time.Sleep(time.Millisecond * 200) + return output{A: 1}, nil + }) + elapsed := time.Since(start) + + assert.ErrorIs(t, err, util.ErrTimeout) + assert.Empty(t, res) + assert.Less(t, elapsed, 150*time.Millisecond) + + // operation completed + start = time.Now() + res, err = util.WaitWithTimeout(ctx, time.Millisecond*200, func(ctx context.Context) (output, error) { + time.Sleep(time.Millisecond * 160) + return output{A: 1}, nil + }) + elapsed = time.Since(start) + + assert.NoError(t, err) + assert.Equal(t, 1, res.A) + assert.Less(t, elapsed, 180*time.Millisecond) + + // operation completed with error + testErr := errors.New("test error") + start = time.Now() + res, err = util.WaitWithTimeout(ctx, time.Millisecond*100, func(ctx context.Context) (output, error) { + return output{}, testErr + }) + elapsed = time.Since(start) + + assert.ErrorIs(t, err, testErr) + assert.Empty(t, res) + assert.Less(t, elapsed, 120*time.Millisecond) +} From 76f3fcebf5c4759fbe71365643cfcedbf162ebd1 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 21 Jun 2023 06:46:40 +0000 Subject: [PATCH 028/160] use WaitWithTimeout in template --- go.mod | 1 + go.sum | 2 ++ pkg/templates/template.go | 18 ++++++------------ 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 1470be5..0705e38 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/labstack/echo/v4 v4.1.16 github.com/lib/pq v1.3.0 github.com/stretchr/testify v1.7.0 + golang.org/x/sync v0.3.0 ) require ( diff --git a/go.sum b/go.sum index 4fd1ca1..bdf3375 100644 --- a/go.sum +++ b/go.sum @@ -42,6 +42,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/pkg/templates/template.go b/pkg/templates/template.go index 45e42da..04b7931 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -6,6 +6,7 @@ import ( "time" "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/util" ) type TemplateState int32 @@ -59,23 +60,16 @@ func (t *Template) WaitUntilReady(ctx context.Context, timeout time.Duration) (e return } - cctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - newStateChan := make(chan TemplateState, 1) - go func() { + state, err := util.WaitWithTimeout(ctx, timeout, func(context.Context) (TemplateState, error) { t.cond.L.Lock() defer t.cond.L.Unlock() t.cond.Wait() - newStateChan <- t.state - }() + return t.state, nil + }) - select { - case state := <-newStateChan: - return state - case <-cctx.Done(): - // timeout means that there were no state changes in the meantime + if err != nil { return currentState } + return state } From 971dee68f7db7ff82a985ea058faccaf8651a6bb Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 21 Jun 2023 06:51:13 +0000 Subject: [PATCH 029/160] don't cleanup dirty db by manager --- pkg/manager/manager.go | 22 ++++++++-------------- pkg/pool/pool.go | 5 +++-- 2 files changed, 11 insertions(+), 16 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 9694c10..6b3426b 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -231,7 +231,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db // early bailout if we are already ready (multiple calls) if state == templates.TemplateStateReady { - return template.Database, nil + return template.Database, ErrTemplateAlreadyInitialized } // Disallow transition from discarded to ready @@ -266,24 +266,18 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData return db.TestDatabase{}, ErrInvalidTemplateState } - testDB, dirty, err := m.pool.GetDB(ctx, template.TemplateHash) + testDB, err := m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseWaitTimeout) if err != nil { - if !errors.Is(err, pool.ErrNoDBReady) { - // internal error occurred, return directly + if errors.Is(err, pool.ErrNoDBReady) { + // no DB is ready, we can try to add a new DB is pool is not full + return m.createTestDatabaseFromTemplate(ctx, template) + } else { + // else internal error occurred, return directly return db.TestDatabase{}, err } - - // no DB is ready, we can try to add a new DB is pool is not full - return m.createTestDatabaseFromTemplate(ctx, template) - } - - // if no error occurred, a testDB has been found - if !dirty { - return testDB, nil } - // clean it, if it's dirty, before returning it to the user - return m.cleanTestDatabase(ctx, testDB, m.makeTemplateDatabaseName(testDB.TemplateHash)) + return testDB, nil } func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) error { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 3a297c6..cfc08f3 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "sync" + "time" "github.com/allaboutapps/integresql/pkg/db" ) @@ -60,7 +61,7 @@ func popFirstKey(idMap dbIDMap) int { return id } -func (p *DBPool) GetDB(ctx context.Context, hash string) (db db.TestDatabase, isDirty bool, err error) { +func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { // ! // DBPool locked @@ -95,8 +96,8 @@ func (p *DBPool) GetDB(ctx context.Context, hash string) (db db.TestDatabase, is return } - isDirty = true index = popFirstKey(pool.dirty) + } // sanity check, should never happen From f89bafd58dd1aa67452824bf5696e82dce23d6b4 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 21 Jun 2023 09:44:12 +0000 Subject: [PATCH 030/160] recreate db in pool in background using channels --- internal/api/templates/templates.go | 2 +- pkg/manager/manager.go | 32 ++-- pkg/manager/manager_config.go | 2 +- pkg/pool/pool.go | 258 ++++++++++++++++++---------- tests/testclient/client.go | 2 +- 5 files changed, 186 insertions(+), 110 deletions(-) diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 1412c0b..3f81548 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -133,7 +133,7 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return echo.ErrServiceUnavailable case manager.ErrTemplateNotFound: return echo.NewHTTPError(http.StatusNotFound, "template not found") - case pool.ErrUnknownID: + case pool.ErrUnknownHash: return echo.NewHTTPError(http.StatusNotFound, "test database not found") default: return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 6b3426b..8375691 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -267,14 +267,13 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData } testDB, err := m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseWaitTimeout) + if errors.Is(err, pool.ErrTimeout) { + // on timeout we can try to extend the pool + testDB, err = m.pool.ExtendPool(ctx, template.Database) + } + if err != nil { - if errors.Is(err, pool.ErrNoDBReady) { - // no DB is ready, we can try to add a new DB is pool is not full - return m.createTestDatabaseFromTemplate(ctx, template) - } else { - // else internal error occurred, return directly - return db.TestDatabase{}, err - } + return db.TestDatabase{}, err } return testDB, nil @@ -384,22 +383,17 @@ func (m *Manager) cleanTestDatabase(ctx context.Context, testDB db.TestDatabase, // createTestDatabaseFromTemplate adds a new test database in the pool (increasing its size) basing on the given template. // It waits until the template is ready. -func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template *templates.Template) (db.TestDatabase, error) { +func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template *templates.Template) error { if template.WaitUntilReady(ctx, m.config.TestDatabaseWaitTimeout) != templates.TemplateStateReady { // if the state changed in the meantime, return - return db.TestDatabase{}, ErrInvalidTemplateState + return ErrInvalidTemplateState } - dbNamePrefix := m.makeTestDatabasePrefix(template.TemplateHash) - testDB, err := m.pool.AddTestDatabase(ctx, template.Database, dbNamePrefix, func(testDB db.TestDatabase) error { + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, template.Config.Database) - }) - - if err != nil { - return db.TestDatabase{}, err } - return testDB, nil + return m.pool.AddTestDatabase(ctx, template.Database, initFunc) } // Adds new test databases for a template, intended to be run asynchronously from other operations in a separate goroutine, using the manager's WaitGroup to synchronize for shutdown. @@ -413,8 +407,10 @@ func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Templa defer cancel() for i := 0; i < count; i++ { - // TODO log error somewhere instead of silently swallowing it? - _, _ = m.createTestDatabaseFromTemplate(ctx, template) + if err := m.createTestDatabaseFromTemplate(ctx, template); err != nil { + // TODO anna: error handling + fmt.Printf("integresql: failed to initialize DB: %v\n", err) + } } }() diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 0fb12cf..5eb7ca5 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -55,6 +55,6 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), - TestDatabaseWaitTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB READY_WAIT_TIMEOUT", 1000)), + TestDatabaseWaitTimeout: time.Second * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_WAIT_TIMEOUT_SEC", 10)), } } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index cfc08f3..d600690 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -11,11 +11,19 @@ import ( ) var ( - ErrUnknownHash = errors.New("no db.Database exists for this hash") + ErrUnknownHash = errors.New("no database pool exists for this hash") ErrPoolFull = errors.New("database pool is full") - ErrUnknownID = errors.New("database is not in the pool") - ErrNoDBReady = errors.New("no db.Database is currently ready, perhaps you need to create one") - ErrInvalidIndex = errors.New("invalid db.Database index (ID)") + ErrInvalidState = errors.New("database state is not valid for this operation") + ErrInvalidIndex = errors.New("invalid db.Database index (id)") + ErrTimeout = errors.New("timeout on waiting for ready db") +) + +type dbState int + +const ( + dbStateReady = iota + dbStateInUse = iota + dbStateDirty = iota ) type DBPool struct { @@ -23,10 +31,9 @@ type DBPool struct { mutex sync.RWMutex maxPoolSize int + wg sync.WaitGroup } -type dbIDMap map[int]bool // map[db ID] - func NewDBPool(maxPoolSize int) *DBPool { return &DBPool{ pools: make(map[string]*dbHashPool), @@ -35,30 +42,64 @@ func NewDBPool(maxPoolSize int) *DBPool { } } +type RecreateDBFunc func(ctx context.Context, testDB db.TestDatabase, templateName string) error + +type existingDB struct { + state dbState + db.TestDatabase +} + type dbHashPool struct { - dbs []db.TestDatabase - ready dbIDMap // initalized DBs according to a template, ready to pick them up - dirty dbIDMap // returned DBs, need to be initalized again to reuse them + dbs []existingDB + ready chan int // ID; initalized DBs according to a template, ready to pick them up + dirty chan int // ID; returned DBs, need to be initalized again to reuse them + recreateDB RecreateDBFunc + templateDB db.Database sync.RWMutex } -func newDBHashPool(maxPoolSize int) *dbHashPool { +func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Database) *dbHashPool { return &dbHashPool{ - dbs: make([]db.TestDatabase, 0, maxPoolSize), - ready: make(dbIDMap), - dirty: make(dbIDMap), + dbs: make([]existingDB, 0, maxPoolSize), + ready: make(chan int, maxPoolSize), + dirty: make(chan int, maxPoolSize), + recreateDB: recreateDB, + templateDB: templateDB, } } -func popFirstKey(idMap dbIDMap) int { - id := -1 - for key := range idMap { - id = key - break +func (h *dbHashPool) cleanUpDirtyDBWorker() { + ctx := context.Background() + templateName := h.templateDB.Config.Database + + for dirtyID := range h.dirty { + h.RLock() + if dirtyID >= len(h.dbs) { + // sanity check, should never happen + h.RUnlock() + continue + } + testDB := h.dbs[dirtyID] + h.RUnlock() + + if testDB.state != dbStateDirty { + continue + } + + if err := h.recreateDB(ctx, testDB.TestDatabase, templateName); err != nil { + // TODO anna: error handling + fmt.Printf("integresql: failed to clean up DB: %v\n", err) + continue + } + + h.Lock() + testDB.state = dbStateReady + h.dbs[dirtyID] = testDB + h.Unlock() + + h.ready <- testDB.ID } - delete(idMap, id) - return id } func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { @@ -66,7 +107,6 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. // ! // DBPool locked p.mutex.Lock() - pool := p.pools[hash] if pool == nil { @@ -76,116 +116,136 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. return } - // ! - // dbHashPool locked before unlocking DBPool - pool.Lock() - defer pool.Unlock() - p.mutex.Unlock() // DBPool unlocked // ! var index int - if len(pool.ready) > 0 { - // if there are some ready to be used DB, just get one - index = popFirstKey(pool.ready) - } else { - // if no DBs are ready, reuse the dirty ones - if len(pool.dirty) == 0 { - err = ErrNoDBReady - return - } - - index = popFirstKey(pool.dirty) - - } - - // sanity check, should never happen - if index < 0 || index >= p.maxPoolSize { - err = ErrInvalidIndex + select { + case <-time.After(timeout): + err = ErrTimeout return + case index = <-pool.ready: } - // pick a ready test db.Database from the index - if len(pool.dbs) <= index { + // ! + // dbHashPool locked + pool.Lock() + defer pool.Unlock() + + // sanity check, should never happen + if index < 0 || index >= len(pool.dbs) { err = ErrInvalidIndex return } - return pool.dbs[index], isDirty, nil + return pool.dbs[index].TestDatabase, nil // dbHashPool unlocked // ! } -func (p *DBPool) AddTestDatabase(ctx context.Context, template db.Database, dbNamePrefix string, initFunc func(db.TestDatabase) error) (db.TestDatabase, error) { - hash := template.TemplateHash - - // ! - // DBPool locked - p.mutex.Lock() - - pool := p.pools[hash] - if pool == nil { - pool = newDBHashPool(p.maxPoolSize) - p.pools[hash] = pool - } - +func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { // ! // dbHashPool locked pool.Lock() defer pool.Unlock() - p.mutex.Unlock() - // DBPool unlocked - // ! - // get index of a next test DB - its ID index := len(pool.dbs) - if index >= p.maxPoolSize { + if index == cap(pool.dbs) { return db.TestDatabase{}, ErrPoolFull } // initalization of a new DB newTestDB := db.TestDatabase{ Database: db.Database{ - TemplateHash: template.TemplateHash, - Config: template.Config, + TemplateHash: pool.templateDB.TemplateHash, + Config: pool.templateDB.Config, }, ID: index, } // db name has an ID in suffix - dbName := fmt.Sprintf("%s%03d", dbNamePrefix, index) + templateName := pool.templateDB.Config.Database + dbName := fmt.Sprintf("%s_%03d", templateName, index) newTestDB.Database.Config.Database = dbName - if err := initFunc(newTestDB); err != nil { + if err := pool.recreateDB(ctx, newTestDB, templateName); err != nil { return db.TestDatabase{}, err } // add new test DB to the pool - pool.dbs = append(pool.dbs, newTestDB) - - // and add its index to 'ready' - pool.ready[index] = true + pool.dbs = append(pool.dbs, existingDB{state: dbStateReady, TestDatabase: newTestDB}) return newTestDB, nil // dbHashPool unlocked // ! } -func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { +func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, initFunc RecreateDBFunc) error { + hash := templateDB.TemplateHash // ! // DBPool locked p.mutex.Lock() + pool := p.pools[hash] + + if pool == nil { + // create a new dbHashPool + pool = newDBHashPool(p.maxPoolSize, pool.recreateDB, templateDB) + // and start the cleaning worker + p.enableCleanUpDirtyDBWorker(pool) - // needs to be checked inside locked region - // because we access maxPoolSize - if id < 0 || id >= p.maxPoolSize { + // pool is ready + p.pools[hash] = pool + } + + p.mutex.Unlock() + // DBPool unlocked + // ! + + newTestDB, err := pool.extend(ctx) + if err != nil { + return err + } + + // and add its index to 'ready' + pool.ready <- newTestDB.ID + + return nil +} + +func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { + hash := templateDB.TemplateHash + + // ! + // DBPool locked + p.mutex.Lock() + pool := p.pools[hash] + + if pool == nil { + // meant to be only for already initialized pools p.mutex.Unlock() - return ErrInvalidIndex + return db.TestDatabase{}, ErrUnknownHash } + p.mutex.Unlock() + // DBPool unlocked + // ! + + newTestDB, err := pool.extend(ctx) + if err != nil { + return db.TestDatabase{}, err + } + + return newTestDB, nil +} + +func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { + + // ! + // DBPool locked + p.mutex.Lock() pool := p.pools[hash] if pool == nil { @@ -203,16 +263,21 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // DBPool unlocked // ! - // check if pool has been already returned - if pool.dirty != nil && len(pool.dirty) > 0 { - exists := pool.dirty[id] - if exists { - return ErrUnknownID - } + if id >= len(pool.dbs) { + return ErrInvalidIndex + } + + // check if db is in the correct state + testDB := pool.dbs[id] + if testDB.state != dbStateInUse { + return ErrInvalidState } - // ok, it hasn't been returned yet - pool.dirty[id] = true + testDB.state = dbStateDirty + pool.dbs[id] = testDB + + // add it to dirty channel, to have it cleaned up by the worker + pool.dirty <- id return nil // dbHashPool unlocked @@ -238,7 +303,19 @@ func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc // ! } +func (p *DBPool) enableCleanUpDirtyDBWorker(pool *dbHashPool) { + p.wg.Add(1) + go func() { + defer p.wg.Done() + pool.cleanUpDirtyDBWorker() + }() +} + func (p *DBPool) removeAllFromPool(pool *dbHashPool, removeFunc func(db.TestDatabase) error) error { + // close the channels, and reopen them when the operation is completed + close(pool.dirty) + close(pool.ready) + // ! // dbHashPool locked pool.Lock() @@ -246,17 +323,20 @@ func (p *DBPool) removeAllFromPool(pool *dbHashPool, removeFunc func(db.TestData // remove from back to be able to repeat operation in case of error for id := len(pool.dbs) - 1; id >= 0; id-- { - db := pool.dbs[id] + testDB := pool.dbs[id].TestDatabase - if err := removeFunc(db); err != nil { + if err := removeFunc(testDB); err != nil { return err } pool.dbs = pool.dbs[:len(pool.dbs)-1] - delete(pool.dirty, id) - delete(pool.ready, id) } + // all DBs removed, enable the worker again + pool.dirty = make(chan int, p.maxPoolSize) + pool.ready = make(chan int, p.maxPoolSize) + p.enableCleanUpDirtyDBWorker(pool) + return nil // dbHashPool unlocked // ! diff --git a/tests/testclient/client.go b/tests/testclient/client.go index e416f78..dd80338 100644 --- a/tests/testclient/client.go +++ b/tests/testclient/client.go @@ -220,7 +220,7 @@ func (c *Client) GetTestDatabase(ctx context.Context, hash string) (TestDatabase case http.StatusNotFound: return test, manager.ErrTemplateNotFound case http.StatusGone: - return test, pool.ErrUnknownID + return test, pool.ErrInvalidIndex case http.StatusServiceUnavailable: return test, manager.ErrManagerNotReady default: From c7cd87f06b7ab54831d92bdb05bac3fde62c0229 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 21 Jun 2023 10:39:26 +0000 Subject: [PATCH 031/160] add pool tests --- pkg/pool/pool.go | 38 +++++++-- pkg/pool/pool_test.go | 182 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 212 insertions(+), 8 deletions(-) create mode 100644 pkg/pool/pool_test.go diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index d600690..a88f33c 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -69,7 +69,7 @@ func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Dat } } -func (h *dbHashPool) cleanUpDirtyDBWorker() { +func (h *dbHashPool) workerCleanUpDirtyDB() { ctx := context.Background() templateName := h.templateDB.Config.Database @@ -102,6 +102,19 @@ func (h *dbHashPool) cleanUpDirtyDBWorker() { } } +func (p *DBPool) Stop() { + p.mutex.Lock() + defer p.mutex.Unlock() + + for _, pool := range p.pools { + close(pool.dirty) + } + p.wg.Wait() + for _, pool := range p.pools { + close(pool.ready) + } +} + func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { // ! @@ -139,10 +152,19 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. return } - return pool.dbs[index].TestDatabase, nil + givenTestDB := pool.dbs[index] + // sanity check, should never happen - we got this index from 'ready' channel + if givenTestDB.state != dbStateReady { + err = ErrInvalidState + return + } + + givenTestDB.state = dbStateInUse + pool.dbs[index] = givenTestDB + + return givenTestDB.TestDatabase, nil // dbHashPool unlocked // ! - } func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { @@ -192,9 +214,9 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in if pool == nil { // create a new dbHashPool - pool = newDBHashPool(p.maxPoolSize, pool.recreateDB, templateDB) + pool = newDBHashPool(p.maxPoolSize, initFunc, templateDB) // and start the cleaning worker - p.enableCleanUpDirtyDBWorker(pool) + p.enableworkerCleanUpDirtyDB(pool) // pool is ready p.pools[hash] = pool @@ -303,11 +325,11 @@ func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc // ! } -func (p *DBPool) enableCleanUpDirtyDBWorker(pool *dbHashPool) { +func (p *DBPool) enableworkerCleanUpDirtyDB(pool *dbHashPool) { p.wg.Add(1) go func() { defer p.wg.Done() - pool.cleanUpDirtyDBWorker() + pool.workerCleanUpDirtyDB() }() } @@ -335,7 +357,7 @@ func (p *DBPool) removeAllFromPool(pool *dbHashPool, removeFunc func(db.TestData // all DBs removed, enable the worker again pool.dirty = make(chan int, p.maxPoolSize) pool.ready = make(chan int, p.maxPoolSize) - p.enableCleanUpDirtyDBWorker(pool) + p.enableworkerCleanUpDirtyDB(pool) return nil // dbHashPool unlocked diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go new file mode 100644 index 0000000..05c8bb2 --- /dev/null +++ b/pkg/pool/pool_test.go @@ -0,0 +1,182 @@ +package pool_test + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/pool" + "github.com/stretchr/testify/assert" +) + +func TestPoolAddGet(t *testing.T) { + t.Parallel() + + ctx := context.Background() + p := pool.NewDBPool(2) + + hash1 := "h1" + hash2 := "h2" + templateDB := db.Database{ + TemplateHash: hash1, + Config: db.DatabaseConfig{ + Username: "ich", + Database: "template_name", + }, + } + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + t.Log("(re)create ", testDB.Database) + return nil + } + + // get from empty + _, err := p.GetTestDatabase(ctx, hash1, 0) + assert.Error(t, err, pool.ErrTimeout) + + // add a new one + assert.NoError(t, p.AddTestDatabase(ctx, templateDB, initFunc)) + // get it + testDB, err := p.GetTestDatabase(ctx, hash1, 0) + assert.NoError(t, err) + assert.Equal(t, "template_name_000", testDB.Database.Config.Database) + assert.Equal(t, "ich", testDB.Database.Config.Username) + + // add for h2 + templateDB.TemplateHash = hash2 + assert.NoError(t, p.AddTestDatabase(ctx, templateDB, initFunc)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB, initFunc)) + assert.ErrorIs(t, p.AddTestDatabase(ctx, templateDB, initFunc), pool.ErrPoolFull) + + // get from empty h1 + _, err = p.GetTestDatabase(ctx, hash1, 0) + assert.Error(t, err, pool.ErrTimeout) + + // get from h2 + testDB1, err := p.GetTestDatabase(ctx, hash2, 0) + assert.NoError(t, err) + assert.Equal(t, hash2, testDB1.TemplateHash) + testDB2, err := p.GetTestDatabase(ctx, hash2, 0) + assert.NoError(t, err) + assert.Equal(t, hash2, testDB2.TemplateHash) + assert.NotEqual(t, testDB1.ID, testDB2.ID) + + p.Stop() +} + +func TestPoolAddGetConcurrent(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + hash2 := "h2" + templateDB1 := db.Database{ + TemplateHash: hash1, + } + templateDB2 := db.Database{ + TemplateHash: hash2, + } + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + t.Log("(re)create ", testDB.Database) + return nil + } + + maxPoolSize := 6 + p := pool.NewDBPool(maxPoolSize) + + var wg sync.WaitGroup + sleepDuration := 100 * time.Millisecond + + // add test databases first to initialize hash pool + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) + + // add DB in one goroutine + wg.Add(1) + go func() { + defer wg.Done() + + templateDB1 := templateDB1 + templateDB2 := templateDB2 + sleepDuration := sleepDuration + + // add DBs sequentially + for i := 0; i < maxPoolSize-1; i++ { + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) + time.Sleep(sleepDuration) + } + }() + + // try to get them from another goroutines in parallel + getDB := func(hash string) { + defer wg.Done() + + sleepDuration := sleepDuration + + db, err := p.GetTestDatabase(ctx, hash, time.Duration(maxPoolSize)*sleepDuration) + assert.NoError(t, err) + assert.Equal(t, hash, db.TemplateHash) + t.Logf("got %s %v\n", db.TemplateHash, db.ID) + } + + for i := 0; i < maxPoolSize; i++ { + wg.Add(2) + go getDB(hash1) + go getDB(hash2) + } + + wg.Wait() + p.Stop() +} + +func TestPoolAddGetReturnConcurrent(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + hash2 := "h2" + templateDB1 := db.Database{ + TemplateHash: hash1, + } + templateDB2 := db.Database{ + TemplateHash: hash2, + } + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + t.Log("(re)create ", testDB.Database) + return nil + } + + maxPoolSize := 6 + p := pool.NewDBPool(maxPoolSize) + + var wg sync.WaitGroup + + // add DBs sequentially + for i := 0; i < maxPoolSize/2; i++ { + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) + } + + // try to get them from another goroutines in parallel + getAndReturnDB := func(hash string) { + defer wg.Done() + + db, err := p.GetTestDatabase(ctx, hash, 3*time.Second) + assert.NoError(t, err) + assert.Equal(t, hash, db.TemplateHash) + time.Sleep(20 * time.Millisecond) + t.Logf("returning %s %v\n", db.TemplateHash, db.ID) + assert.NoError(t, p.ReturnTestDatabase(ctx, hash, db.ID)) + } + + for i := 0; i < maxPoolSize*3; i++ { + wg.Add(2) + go getAndReturnDB(hash1) + go getAndReturnDB(hash2) + } + + wg.Wait() + p.Stop() +} From 788004fe09c781c8c55ad195b9d72b89867b909f Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 21 Jun 2023 12:23:14 +0000 Subject: [PATCH 032/160] fix RemoveAll --- pkg/pool/pool.go | 67 ++++++++++++++++++++++--------------------- pkg/pool/pool_test.go | 48 +++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 33 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index a88f33c..84f8545 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -204,6 +204,31 @@ func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { // ! } +func (pool *dbHashPool) removeAllFromPool(removeFunc func(db.TestDatabase) error) error { + // close the dirty channel to stop the worker + close(pool.dirty) + + // ! + // dbHashPool locked + pool.Lock() + defer pool.Unlock() + + // remove from back to be able to repeat operation in case of error + for id := len(pool.dbs) - 1; id >= 0; id-- { + testDB := pool.dbs[id].TestDatabase + + if err := removeFunc(testDB); err != nil { + return err + } + + pool.dbs = pool.dbs[:len(pool.dbs)-1] + } + + return nil + // dbHashPool unlocked + // ! +} + func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, initFunc RecreateDBFunc) error { hash := templateDB.TemplateHash @@ -320,7 +345,14 @@ func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc return ErrUnknownHash } - return p.removeAllFromPool(pool, removeFunc) + if err := pool.removeAllFromPool(removeFunc); err != nil { + return err + } + + // all DBs have been removed, now remove the pool itself + delete(p.pools, hash) + + return nil // DBPool unlocked // ! } @@ -333,37 +365,6 @@ func (p *DBPool) enableworkerCleanUpDirtyDB(pool *dbHashPool) { }() } -func (p *DBPool) removeAllFromPool(pool *dbHashPool, removeFunc func(db.TestDatabase) error) error { - // close the channels, and reopen them when the operation is completed - close(pool.dirty) - close(pool.ready) - - // ! - // dbHashPool locked - pool.Lock() - defer pool.Unlock() - - // remove from back to be able to repeat operation in case of error - for id := len(pool.dbs) - 1; id >= 0; id-- { - testDB := pool.dbs[id].TestDatabase - - if err := removeFunc(testDB); err != nil { - return err - } - - pool.dbs = pool.dbs[:len(pool.dbs)-1] - } - - // all DBs removed, enable the worker again - pool.dirty = make(chan int, p.maxPoolSize) - pool.ready = make(chan int, p.maxPoolSize) - p.enableworkerCleanUpDirtyDB(pool) - - return nil - // dbHashPool unlocked - // ! -} - func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) error) error { // ! // DBPool locked @@ -371,7 +372,7 @@ func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) defer p.mutex.Unlock() for hash, pool := range p.pools { - if err := p.removeAllFromPool(pool, removeFunc); err != nil { + if err := pool.removeAllFromPool(removeFunc); err != nil { return err } diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index 05c8bb2..5dde061 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -180,3 +180,51 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { wg.Wait() p.Stop() } + +func TestPoolRemoveAll(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + hash2 := "h2" + templateDB1 := db.Database{ + TemplateHash: hash1, + } + templateDB2 := db.Database{ + TemplateHash: hash2, + } + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + t.Log("(re)create ", testDB.Database) + return nil + } + removeFunc := func(testDB db.TestDatabase) error { + t.Log("remove ", testDB.Database) + return nil + } + + maxPoolSize := 6 + p := pool.NewDBPool(maxPoolSize) + + // add DBs sequentially + for i := 0; i < maxPoolSize; i++ { + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) + } + + // remove all + assert.NoError(t, p.RemoveAll(ctx, removeFunc)) + + // try to get + _, err := p.GetTestDatabase(ctx, hash1, 0) + assert.Error(t, err, pool.ErrTimeout) + _, err = p.GetTestDatabase(ctx, hash2, 0) + assert.Error(t, err, pool.ErrTimeout) + + // start using pool again + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) + testDB, err := p.GetTestDatabase(ctx, hash1, 0) + assert.NoError(t, err) + assert.Equal(t, 0, testDB.ID) + + p.Stop() +} From ac4ea86c41d61fd854b45e59495d838826892889 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 21 Jun 2023 12:26:14 +0000 Subject: [PATCH 033/160] move down pool private functions --- pkg/pool/pool.go | 218 +++++++++++++++++++++++------------------------ 1 file changed, 109 insertions(+), 109 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 84f8545..dd09b80 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -51,57 +51,14 @@ type existingDB struct { type dbHashPool struct { dbs []existingDB - ready chan int // ID; initalized DBs according to a template, ready to pick them up - dirty chan int // ID; returned DBs, need to be initalized again to reuse them + ready chan int // ID of initalized DBs according to a template, ready to pick them up + dirty chan int // ID of returned DBs, need to be initalized again to reuse them recreateDB RecreateDBFunc templateDB db.Database sync.RWMutex } -func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Database) *dbHashPool { - return &dbHashPool{ - dbs: make([]existingDB, 0, maxPoolSize), - ready: make(chan int, maxPoolSize), - dirty: make(chan int, maxPoolSize), - recreateDB: recreateDB, - templateDB: templateDB, - } -} - -func (h *dbHashPool) workerCleanUpDirtyDB() { - ctx := context.Background() - templateName := h.templateDB.Config.Database - - for dirtyID := range h.dirty { - h.RLock() - if dirtyID >= len(h.dbs) { - // sanity check, should never happen - h.RUnlock() - continue - } - testDB := h.dbs[dirtyID] - h.RUnlock() - - if testDB.state != dbStateDirty { - continue - } - - if err := h.recreateDB(ctx, testDB.TestDatabase, templateName); err != nil { - // TODO anna: error handling - fmt.Printf("integresql: failed to clean up DB: %v\n", err) - continue - } - - h.Lock() - testDB.state = dbStateReady - h.dbs[dirtyID] = testDB - h.Unlock() - - h.ready <- testDB.ID - } -} - func (p *DBPool) Stop() { p.mutex.Lock() defer p.mutex.Unlock() @@ -167,68 +124,6 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. // ! } -func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { - // ! - // dbHashPool locked - pool.Lock() - defer pool.Unlock() - - // get index of a next test DB - its ID - index := len(pool.dbs) - if index == cap(pool.dbs) { - return db.TestDatabase{}, ErrPoolFull - } - - // initalization of a new DB - newTestDB := db.TestDatabase{ - Database: db.Database{ - TemplateHash: pool.templateDB.TemplateHash, - Config: pool.templateDB.Config, - }, - ID: index, - } - // db name has an ID in suffix - templateName := pool.templateDB.Config.Database - dbName := fmt.Sprintf("%s_%03d", templateName, index) - newTestDB.Database.Config.Database = dbName - - if err := pool.recreateDB(ctx, newTestDB, templateName); err != nil { - return db.TestDatabase{}, err - } - - // add new test DB to the pool - pool.dbs = append(pool.dbs, existingDB{state: dbStateReady, TestDatabase: newTestDB}) - - return newTestDB, nil - // dbHashPool unlocked - // ! -} - -func (pool *dbHashPool) removeAllFromPool(removeFunc func(db.TestDatabase) error) error { - // close the dirty channel to stop the worker - close(pool.dirty) - - // ! - // dbHashPool locked - pool.Lock() - defer pool.Unlock() - - // remove from back to be able to repeat operation in case of error - for id := len(pool.dbs) - 1; id >= 0; id-- { - testDB := pool.dbs[id].TestDatabase - - if err := removeFunc(testDB); err != nil { - return err - } - - pool.dbs = pool.dbs[:len(pool.dbs)-1] - } - - return nil - // dbHashPool unlocked - // ! -} - func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, initFunc RecreateDBFunc) error { hash := templateDB.TemplateHash @@ -345,7 +240,7 @@ func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc return ErrUnknownHash } - if err := pool.removeAllFromPool(removeFunc); err != nil { + if err := pool.removeAll(removeFunc); err != nil { return err } @@ -372,7 +267,7 @@ func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) defer p.mutex.Unlock() for hash, pool := range p.pools { - if err := pool.removeAllFromPool(removeFunc); err != nil { + if err := pool.removeAll(removeFunc); err != nil { return err } @@ -383,3 +278,108 @@ func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) // DBPool unlocked // ! } + +func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Database) *dbHashPool { + return &dbHashPool{ + dbs: make([]existingDB, 0, maxPoolSize), + ready: make(chan int, maxPoolSize), + dirty: make(chan int, maxPoolSize), + recreateDB: recreateDB, + templateDB: templateDB, + } +} + +func (pool *dbHashPool) workerCleanUpDirtyDB() { + ctx := context.Background() + templateName := pool.templateDB.Config.Database + + for dirtyID := range pool.dirty { + pool.RLock() + if dirtyID >= len(pool.dbs) { + // sanity check, should never happen + pool.RUnlock() + continue + } + testDB := pool.dbs[dirtyID] + pool.RUnlock() + + if testDB.state != dbStateDirty { + continue + } + + if err := pool.recreateDB(ctx, testDB.TestDatabase, templateName); err != nil { + // TODO anna: error handling + fmt.Printf("integresql: failed to clean up DB: %v\n", err) + continue + } + + pool.Lock() + testDB.state = dbStateReady + pool.dbs[dirtyID] = testDB + pool.Unlock() + + pool.ready <- testDB.ID + } +} + +func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { + // ! + // dbHashPool locked + pool.Lock() + defer pool.Unlock() + + // get index of a next test DB - its ID + index := len(pool.dbs) + if index == cap(pool.dbs) { + return db.TestDatabase{}, ErrPoolFull + } + + // initalization of a new DB + newTestDB := db.TestDatabase{ + Database: db.Database{ + TemplateHash: pool.templateDB.TemplateHash, + Config: pool.templateDB.Config, + }, + ID: index, + } + // db name has an ID in suffix + templateName := pool.templateDB.Config.Database + dbName := fmt.Sprintf("%s_%03d", templateName, index) + newTestDB.Database.Config.Database = dbName + + if err := pool.recreateDB(ctx, newTestDB, templateName); err != nil { + return db.TestDatabase{}, err + } + + // add new test DB to the pool + pool.dbs = append(pool.dbs, existingDB{state: dbStateReady, TestDatabase: newTestDB}) + + return newTestDB, nil + // dbHashPool unlocked + // ! +} + +func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error { + // close the dirty channel to stop the worker + close(pool.dirty) + + // ! + // dbHashPool locked + pool.Lock() + defer pool.Unlock() + + // remove from back to be able to repeat operation in case of error + for id := len(pool.dbs) - 1; id >= 0; id-- { + testDB := pool.dbs[id].TestDatabase + + if err := removeFunc(testDB); err != nil { + return err + } + + pool.dbs = pool.dbs[:len(pool.dbs)-1] + } + + return nil + // dbHashPool unlocked + // ! +} From 4d8f411ce30c5ec3ffd0f0aabf05610337db173d Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 21 Jun 2023 12:40:27 +0000 Subject: [PATCH 034/160] rename template state ready to finalized --- pkg/manager/manager.go | 60 ++++++++++++++-------------------- pkg/templates/template.go | 6 ++-- pkg/templates/template_test.go | 12 +++---- 3 files changed, 33 insertions(+), 45 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 8375691..19dedba 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -27,18 +27,18 @@ type Manager struct { db *sql.DB wg sync.WaitGroup - closeChan chan bool - templatesX *templates.Collection - pool *pool.DBPool + closeChan chan bool + templates *templates.Collection + pool *pool.DBPool } func New(config ManagerConfig) *Manager { m := &Manager{ - config: config, - db: nil, - wg: sync.WaitGroup{}, - templatesX: templates.NewCollection(), - pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize), + config: config, + db: nil, + wg: sync.WaitGroup{}, + templates: templates.NewCollection(), + pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize), } if len(m.config.TestDatabaseOwner) == 0 { @@ -162,7 +162,7 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( Database: dbName, } - added, unlock := m.templatesX.Push(ctx, hash, templateConfig) + added, unlock := m.templates.Push(ctx, hash, templateConfig) // unlock template collection only after the template is actually initalized in the DB defer unlock() @@ -172,7 +172,7 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( reg := trace.StartRegion(ctx, "drop_and_create_db") if err := m.dropAndCreateDatabase(ctx, dbName, m.config.ManagerDatabaseConfig.Username, m.config.TemplateDatabaseTemplate); err != nil { - m.templatesX.RemoveUnsafe(ctx, hash) + m.templates.RemoveUnsafe(ctx, hash) return db.Database{}, err } @@ -193,7 +193,7 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro return ErrManagerNotReady } - template, found := m.templatesX.Pop(ctx, hash) + template, found := m.templates.Pop(ctx, hash) dbName := template.Config.Database if !found { @@ -222,7 +222,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db return db.Database{}, ErrManagerNotReady } - template, found := m.templatesX.Get(ctx, hash) + template, found := m.templates.Get(ctx, hash) if !found { return db.Database{}, ErrTemplateNotFound } @@ -230,7 +230,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db state := template.GetState(ctx) // early bailout if we are already ready (multiple calls) - if state == templates.TemplateStateReady { + if state == templates.TemplateStateFinalized { return template.Database, ErrTemplateAlreadyInitialized } @@ -239,7 +239,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db return db.Database{}, ErrTemplateDiscarded } - template.SetState(ctx, templates.TemplateStateReady) + template.SetState(ctx, templates.TemplateStateFinalized) m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) @@ -254,15 +254,15 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData return db.TestDatabase{}, ErrManagerNotReady } - template, found := m.templatesX.Get(ctx, hash) + template, found := m.templates.Get(ctx, hash) if !found { return db.TestDatabase{}, ErrTemplateNotFound } // if the template has been discarded/not initalized yet, // no DB should be returned, even if already in the pool - state := template.WaitUntilReady(ctx, m.config.TestDatabaseWaitTimeout) - if state != templates.TemplateStateReady { + state := template.WaitUntilFinalized(ctx, m.config.TestDatabaseWaitTimeout) + if state != templates.TemplateStateFinalized { return db.TestDatabase{}, ErrInvalidTemplateState } @@ -285,12 +285,14 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e } // check if the template exists and is 'ready' - template, found := m.templatesX.Get(ctx, hash) + template, found := m.templates.Get(ctx, hash) if !found { return ErrTemplateNotFound } - if template.WaitUntilReady(ctx, m.config.TestDatabaseWaitTimeout) != templates.TemplateStateReady { + if template.WaitUntilFinalized(ctx, m.config.TestDatabaseWaitTimeout) != + templates.TemplateStateFinalized { + return ErrInvalidTemplateState } @@ -315,8 +317,8 @@ func (m *Manager) ResetAllTracking(ctx context.Context) error { return ErrManagerNotReady } - // remove all templates to disallow any new test DB creation - m.templatesX.RemoveAll(ctx) + // remove all templates to disallow any new test DB creation from existing templates + m.templates.RemoveAll(ctx) removeFunc := func(testDB db.TestDatabase) error { return m.dropDatabase(ctx, testDB.Config.Database) @@ -371,20 +373,10 @@ func (m *Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owne return m.createDatabase(ctx, dbName, owner, template) } -// cleanTestDatabase recreates a dirty DB obtained from the pool. -// It is created according to the given template. -func (m *Manager) cleanTestDatabase(ctx context.Context, testDB db.TestDatabase, templateDBName string) (db.TestDatabase, error) { - if err := m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateDBName); err != nil { - return db.TestDatabase{}, err - } - - return testDB, nil -} - // createTestDatabaseFromTemplate adds a new test database in the pool (increasing its size) basing on the given template. // It waits until the template is ready. func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template *templates.Template) error { - if template.WaitUntilReady(ctx, m.config.TestDatabaseWaitTimeout) != templates.TemplateStateReady { + if template.WaitUntilFinalized(ctx, m.config.TestDatabaseWaitTimeout) != templates.TemplateStateFinalized { // if the state changed in the meantime, return return ErrInvalidTemplateState } @@ -425,7 +417,3 @@ func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Templa func (m *Manager) makeTemplateDatabaseName(hash string) string { return fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) } - -func (m *Manager) makeTestDatabasePrefix(hash string) string { - return fmt.Sprintf("%s_%s_%s_", m.config.DatabasePrefix, m.config.TestDatabasePrefix, hash) -} diff --git a/pkg/templates/template.go b/pkg/templates/template.go index 04b7931..d42b06f 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -14,7 +14,7 @@ type TemplateState int32 const ( TemplateStateInit TemplateState = iota TemplateStateDiscarded - TemplateStateReady + TemplateStateFinalized ) type Template struct { @@ -54,9 +54,9 @@ func (t *Template) SetState(ctx context.Context, newState TemplateState) { t.cond.Broadcast() } -func (t *Template) WaitUntilReady(ctx context.Context, timeout time.Duration) (exitState TemplateState) { +func (t *Template) WaitUntilFinalized(ctx context.Context, timeout time.Duration) (exitState TemplateState) { currentState := t.GetState(ctx) - if currentState == TemplateStateReady { + if currentState == TemplateStateFinalized { return } diff --git a/pkg/templates/template_test.go b/pkg/templates/template_test.go index 5802932..93a3158 100644 --- a/pkg/templates/template_test.go +++ b/pkg/templates/template_test.go @@ -19,9 +19,9 @@ func TestTemplateGetSetState(t *testing.T) { state := t1.GetState(ctx) assert.Equal(t, templates.TemplateStateInit, state) - t1.SetState(ctx, templates.TemplateStateReady) + t1.SetState(ctx, templates.TemplateStateFinalized) state = t1.GetState(ctx) - assert.Equal(t, templates.TemplateStateReady, state) + assert.Equal(t, templates.TemplateStateFinalized, state) t1.SetState(ctx, templates.TemplateStateDiscarded) state = t1.GetState(ctx) @@ -46,8 +46,8 @@ func TestTemplateWaitForReady(t *testing.T) { go func() { defer wg.Done() timeout := 1 * time.Second - state := t1.WaitUntilReady(ctx, timeout) - if state != templates.TemplateStateReady { + state := t1.WaitUntilFinalized(ctx, timeout) + if state != templates.TemplateStateFinalized { errsChan <- errors.New("expected ready, but is not") } }() @@ -59,7 +59,7 @@ func TestTemplateWaitForReady(t *testing.T) { go func() { defer wg.Done() timeout := 3 * time.Millisecond - state := t1.WaitUntilReady(ctx, timeout) + state := t1.WaitUntilFinalized(ctx, timeout) if state != templates.TemplateStateInit { errsChan <- errors.New("expected state init, but is not") } @@ -68,7 +68,7 @@ func TestTemplateWaitForReady(t *testing.T) { // now set state time.Sleep(5 * time.Millisecond) - t1.SetState(ctx, templates.TemplateStateReady) + t1.SetState(ctx, templates.TemplateStateFinalized) wg.Wait() close(errsChan) From 54191a45b18fdada1d0c9a76d344652ef0270119 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 21 Jun 2023 13:25:11 +0000 Subject: [PATCH 035/160] fix disconnecting manager --- pkg/manager/manager.go | 43 +++++++++++++++++++++++------------------- pkg/manager/testing.go | 14 ++++++++++---- pkg/util/wait.go | 20 ++++++++++++++++++++ 3 files changed, 54 insertions(+), 23 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 19dedba..a060c74 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -11,6 +11,7 @@ import ( "github.com/allaboutapps/integresql/pkg/db" "github.com/allaboutapps/integresql/pkg/pool" "github.com/allaboutapps/integresql/pkg/templates" + "github.com/allaboutapps/integresql/pkg/util" "github.com/lib/pq" ) @@ -27,18 +28,19 @@ type Manager struct { db *sql.DB wg sync.WaitGroup - closeChan chan bool - templates *templates.Collection - pool *pool.DBPool + disconnectChan chan bool + templates *templates.Collection + pool *pool.DBPool } func New(config ManagerConfig) *Manager { m := &Manager{ - config: config, - db: nil, - wg: sync.WaitGroup{}, - templates: templates.NewCollection(), - pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize), + config: config, + db: nil, + wg: sync.WaitGroup{}, + disconnectChan: make(chan bool), + templates: templates.NewCollection(), + pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize), } if len(m.config.TestDatabaseOwner) == 0 { @@ -84,17 +86,21 @@ func (m *Manager) Disconnect(ctx context.Context, ignoreCloseError bool) error { return errors.New("manager is not connected") } - m.closeChan <- true - - c := make(chan struct{}) + // signal stop to background routines go func() { - defer close(c) - m.wg.Wait() + m.disconnectChan <- true }() - select { - case <-c: - case <-ctx.Done(): + _, err := util.WaitWithCancellableCtx(ctx, func(context.Context) (bool, error) { + m.wg.Wait() + return true, nil + }) + + if err != nil { + // we didn't manage to stop on time background routines + // but we will continue and close the DB connection + // TODO anna: error handling + // fmt.Println("integresql: timeout when stopping background tasks") } if err := m.db.Close(); err != nil && !ignoreCloseError { @@ -392,11 +398,11 @@ func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template * func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Template, count int) { ctx, cancel := context.WithCancel(context.Background()) + defer cancel() m.wg.Add(1) go func() { defer m.wg.Done() - defer cancel() for i := 0; i < count; i++ { if err := m.createTestDatabaseFromTemplate(ctx, template); err != nil { @@ -407,9 +413,8 @@ func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Templa }() select { - case <-m.closeChan: + case <-m.disconnectChan: // manager was requested to stop - cancel() case <-ctx.Done(): } } diff --git a/pkg/manager/testing.go b/pkg/manager/testing.go index b79e0c8..6dde462 100644 --- a/pkg/manager/testing.go +++ b/pkg/manager/testing.go @@ -8,6 +8,7 @@ import ( "time" "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/util" ) func testManagerFromEnv() *Manager { @@ -23,13 +24,18 @@ func testManagerFromEnv() *Manager { func disconnectManager(t *testing.T, m *Manager) { t.Helper() + timeout := 1 * time.Second + ctx := context.Background() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() + _, err := util.WaitWithTimeout(ctx, timeout, func(ctx context.Context) (bool, error) { + err := m.Disconnect(ctx, true) + return false, err + }) - if err := m.Disconnect(ctx, true); err != nil { - t.Logf("received error while disconnecting manager: %v", err) + if err != nil { + t.Errorf("received error while disconnecting manager: %v", err) } + } func initTemplateDB(ctx context.Context, errs chan<- error, m *Manager) { diff --git a/pkg/util/wait.go b/pkg/util/wait.go index 1255e91..b4747ea 100644 --- a/pkg/util/wait.go +++ b/pkg/util/wait.go @@ -31,3 +31,23 @@ func WaitWithTimeout[T any](ctx context.Context, timeout time.Duration, operatio return empty, ErrTimeout } } + +func WaitWithCancellableCtx[T any](ctx context.Context, operation func(context.Context) (T, error)) (T, error) { + + resChan := make(chan T, 1) + g, cctx := errgroup.WithContext(ctx) + + g.Go(func() error { + res, err := operation(cctx) + resChan <- res + return err + }) + + select { + case res := <-resChan: + return res, g.Wait() + case <-ctx.Done(): + var empty T + return empty, ErrTimeout + } +} From 6ccb806b1b2378c367328e199610d4829b9163ea Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 21 Jun 2023 13:35:22 +0000 Subject: [PATCH 036/160] use context to cancel manager tasks --- pkg/manager/manager.go | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index a060c74..a87af37 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -28,19 +28,21 @@ type Manager struct { db *sql.DB wg sync.WaitGroup - disconnectChan chan bool - templates *templates.Collection - pool *pool.DBPool + templates *templates.Collection + pool *pool.DBPool + + connectionCtx context.Context + cancelConnectionCtx func() } func New(config ManagerConfig) *Manager { m := &Manager{ - config: config, - db: nil, - wg: sync.WaitGroup{}, - disconnectChan: make(chan bool), - templates: templates.NewCollection(), - pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize), + config: config, + db: nil, + wg: sync.WaitGroup{}, + templates: templates.NewCollection(), + pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize), + connectionCtx: context.TODO(), } if len(m.config.TestDatabaseOwner) == 0 { @@ -78,6 +80,12 @@ func (m *Manager) Connect(ctx context.Context) error { m.db = db + // set cancellable connection context + // used to stop background tasks + ctx, cancel := context.WithCancel(context.Background()) + m.connectionCtx = ctx + m.cancelConnectionCtx = cancel + return nil } @@ -87,9 +95,8 @@ func (m *Manager) Disconnect(ctx context.Context, ignoreCloseError bool) error { } // signal stop to background routines - go func() { - m.disconnectChan <- true - }() + m.cancelConnectionCtx() + m.connectionCtx = context.TODO() _, err := util.WaitWithCancellableCtx(ctx, func(context.Context) (bool, error) { m.wg.Wait() @@ -397,8 +404,7 @@ func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template * // Adds new test databases for a template, intended to be run asynchronously from other operations in a separate goroutine, using the manager's WaitGroup to synchronize for shutdown. func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Template, count int) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := m.connectionCtx m.wg.Add(1) go func() { @@ -412,11 +418,6 @@ func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Templa } }() - select { - case <-m.disconnectChan: - // manager was requested to stop - case <-ctx.Done(): - } } func (m *Manager) makeTemplateDatabaseName(hash string) string { From b4d8db7f5cd29c850e7f9ae3e19b6448ea76dbcc Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 06:18:32 +0000 Subject: [PATCH 037/160] move manager test code to _test package --- pkg/manager/{testing.go => helpers_test.go} | 22 +++++++---- pkg/manager/manager.go | 7 ++-- pkg/manager/manager_test.go | 41 +++++++++++---------- 3 files changed, 40 insertions(+), 30 deletions(-) rename pkg/manager/{testing.go => helpers_test.go} (87%) diff --git a/pkg/manager/testing.go b/pkg/manager/helpers_test.go similarity index 87% rename from pkg/manager/testing.go rename to pkg/manager/helpers_test.go index 6dde462..bf1106f 100644 --- a/pkg/manager/testing.go +++ b/pkg/manager/helpers_test.go @@ -1,4 +1,4 @@ -package manager +package manager_test import ( "context" @@ -8,20 +8,28 @@ import ( "time" "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/manager" "github.com/allaboutapps/integresql/pkg/util" ) -func testManagerFromEnv() *Manager { - conf := DefaultManagerConfigFromEnv() +func testManagerFromEnv() *manager.Manager { + conf := manager.DefaultManagerConfigFromEnv() conf.DatabasePrefix = "pgtestpool" // ensure we don't overlap with other pools running concurrently - return New(conf) + m, _ := manager.New(conf) + return m +} + +func testManagerFromEnvWithConfig() (*manager.Manager, manager.ManagerConfig) { + conf := manager.DefaultManagerConfigFromEnv() + conf.DatabasePrefix = "pgtestpool" // ensure we don't overlap with other pools running concurrently + return manager.New(conf) } // test helpers should never return errors, but are passed the *testing.T instance and fail if needed. It seems to be recommended helper functions are moved to a testing.go file... // https://medium.com/@povilasve/go-advanced-tips-tricks-a872503ac859 // https://about.sourcegraph.com/go/advanced-testing-in-go -func disconnectManager(t *testing.T, m *Manager) { +func disconnectManager(t *testing.T, m *manager.Manager) { t.Helper() timeout := 1 * time.Second @@ -38,7 +46,7 @@ func disconnectManager(t *testing.T, m *Manager) { } -func initTemplateDB(ctx context.Context, errs chan<- error, m *Manager) { +func initTemplateDB(ctx context.Context, errs chan<- error, m *manager.Manager) { template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash") if err != nil { @@ -146,7 +154,7 @@ func verifyTestDB(t *testing.T, test db.TestDatabase) { } } -func getTestDB(ctx context.Context, errs chan<- error, m *Manager) { +func getTestDB(ctx context.Context, errs chan<- error, m *manager.Manager) { _, err := m.GetTestDatabase(context.Background(), "hashinghash") errs <- err diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index a87af37..e9d3db2 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -35,7 +35,7 @@ type Manager struct { cancelConnectionCtx func() } -func New(config ManagerConfig) *Manager { +func New(config ManagerConfig) (*Manager, ManagerConfig) { m := &Manager{ config: config, db: nil, @@ -57,11 +57,12 @@ func New(config ManagerConfig) *Manager { m.config.TestDatabaseInitialPoolSize = m.config.TestDatabaseMaxPoolSize } - return m + return m, m.config } func DefaultFromEnv() *Manager { - return New(DefaultManagerConfigFromEnv()) + m, _ := New(DefaultManagerConfigFromEnv()) + return m } func (m *Manager) Connect(ctx context.Context) error { diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 8fe5ba9..71c78c8 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -1,4 +1,4 @@ -package manager +package manager_test import ( "context" @@ -9,6 +9,7 @@ import ( "time" "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/manager" "github.com/lib/pq" "github.com/stretchr/testify/assert" ) @@ -31,7 +32,7 @@ func TestManagerConnect(t *testing.T) { func TestManagerConnectError(t *testing.T) { t.Parallel() - m := New(ManagerConfig{ + m, _ := manager.New(manager.ManagerConfig{ ManagerDatabaseConfig: db.DatabaseConfig{ Host: "definitelydoesnotexist", Port: 2345, @@ -169,7 +170,7 @@ func TestManagerInitializeTemplateDatabaseConcurrently(t *testing.T) { if err == nil { success++ } else { - if err == ErrTemplateAlreadyInitialized { + if err == manager.ErrTemplateAlreadyInitialized { failed++ } else { errored++ @@ -207,27 +208,27 @@ func TestManagerFinalizeTemplateDatabase(t *testing.T) { populateTemplateDB(t, template) - // template, err = m.FinalizeTemplateDatabase(ctx, hash) - // if err != nil { - // t.Fatalf("failed to finalize template database: %v", err) - // } + template, err = m.FinalizeTemplateDatabase(ctx, hash) + if err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } - // if !template.Ready(ctx) { - // t.Error("template database is flagged as not ready") - // } + if template.TemplateHash != hash { + t.Error("invalid template hash") + } } func TestManagerFinalizeUntrackedTemplateDatabaseIsNotPossible(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + m, config := testManagerFromEnvWithConfig() if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } defer disconnectManager(t, m) - db, err := sql.Open("postgres", m.config.ManagerDatabaseConfig.ConnectionString()) + db, err := sql.Open("postgres", config.ManagerDatabaseConfig.ConnectionString()) if err != nil { t.Fatalf("failed to open connection to manager database: %v", err) } @@ -238,12 +239,12 @@ func TestManagerFinalizeUntrackedTemplateDatabaseIsNotPossible(t *testing.T) { } hash := "hashinghash" - dbName := fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) + dbName := fmt.Sprintf("%s_%s_%s", config.DatabasePrefix, config.TemplateDatabasePrefix, hash) if _, err := db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { t.Fatalf("failed to manually drop template database %q: %v", dbName, err) } - if _, err := db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(m.config.ManagerDatabaseConfig.Username), pq.QuoteIdentifier(m.config.TemplateDatabaseTemplate))); err != nil { + if _, err := db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(config.ManagerDatabaseConfig.Username), pq.QuoteIdentifier(config.TemplateDatabaseTemplate))); err != nil { t.Fatalf("failed to manually create template database %q: %v", dbName, err) } @@ -605,11 +606,11 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { ctx := context.Background() - cfg := DefaultManagerConfigFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseMaxPoolSize = 3 cfg.DatabasePrefix = "pgtestpool" // ensure we don't overlap with other pools running concurrently - m := New(cfg) + m, _ := manager.New(cfg) if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -708,7 +709,7 @@ func TestManagerReturnTestDatabase(t *testing.T) { func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + m, config := testManagerFromEnvWithConfig() if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -728,7 +729,7 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { t.Fatalf("failed to finalize template database: %v", err) } - db, err := sql.Open("postgres", m.config.ManagerDatabaseConfig.ConnectionString()) + db, err := sql.Open("postgres", config.ManagerDatabaseConfig.ConnectionString()) if err != nil { t.Fatalf("failed to open connection to manager database: %v", err) } @@ -739,12 +740,12 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { } id := 321 - dbName := fmt.Sprintf("%s_%s_%s_%d", m.config.DatabasePrefix, m.config.TestDatabasePrefix, hash, id) + dbName := fmt.Sprintf("%s_%s_%s_%d", config.DatabasePrefix, config.TestDatabasePrefix, hash, id) if _, err := db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { t.Fatalf("failed to manually drop template database %q: %v", dbName, err) } - if _, err := db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(m.config.ManagerDatabaseConfig.Username), pq.QuoteIdentifier(template.Config.Database))); err != nil { + if _, err := db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(config.ManagerDatabaseConfig.Username), pq.QuoteIdentifier(template.Config.Database))); err != nil { t.Fatalf("failed to manually create template database %q: %v", dbName, err) } From ac341f3954223bd1aa35d652b1ae7ca56a1aa382 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 06:54:53 +0000 Subject: [PATCH 038/160] fix template WaitUntilFinalized --- pkg/templates/template.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/templates/template.go b/pkg/templates/template.go index d42b06f..076b737 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -57,10 +57,10 @@ func (t *Template) SetState(ctx context.Context, newState TemplateState) { func (t *Template) WaitUntilFinalized(ctx context.Context, timeout time.Duration) (exitState TemplateState) { currentState := t.GetState(ctx) if currentState == TemplateStateFinalized { - return + return currentState } - state, err := util.WaitWithTimeout(ctx, timeout, func(context.Context) (TemplateState, error) { + newState, err := util.WaitWithTimeout(ctx, timeout, func(context.Context) (TemplateState, error) { t.cond.L.Lock() defer t.cond.L.Unlock() t.cond.Wait() @@ -71,5 +71,5 @@ func (t *Template) WaitUntilFinalized(ctx context.Context, timeout time.Duration if err != nil { return currentState } - return state + return newState } From a487bfcd52db83e4e5050a2b40e9fdc83c6fb130 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 06:55:38 +0000 Subject: [PATCH 039/160] init pool when finalizing a template --- pkg/manager/manager.go | 9 +++++++-- pkg/pool/pool.go | 21 ++++++++++++++------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index e9d3db2..5d29703 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -253,8 +253,13 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db return db.Database{}, ErrTemplateDiscarded } - template.SetState(ctx, templates.TemplateStateFinalized) + // Init a pool with this hash + initDBFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, template.Config.Database) + } + m.pool.InitHashPool(ctx, template.Database, initDBFunc) + template.SetState(ctx, templates.TemplateStateFinalized) m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) return template.Database, nil @@ -414,7 +419,7 @@ func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Templa for i := 0; i < count; i++ { if err := m.createTestDatabaseFromTemplate(ctx, template); err != nil { // TODO anna: error handling - fmt.Printf("integresql: failed to initialize DB: %v\n", err) + // fmt.Printf("integresql: failed to initialize DB: %v\n", err) } } }() diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index dd09b80..138853c 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -59,6 +59,19 @@ type dbHashPool struct { sync.RWMutex } +func (p *DBPool) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { + p.mutex.Lock() + defer p.mutex.Unlock() + + // create a new dbHashPool + pool := newDBHashPool(p.maxPoolSize, initDBFunc, templateDB) + // and start the cleaning worker + p.enableworkerCleanUpDirtyDB(pool) + + // pool is ready + p.pools[pool.templateDB.TemplateHash] = pool +} + func (p *DBPool) Stop() { p.mutex.Lock() defer p.mutex.Unlock() @@ -133,13 +146,7 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in pool := p.pools[hash] if pool == nil { - // create a new dbHashPool - pool = newDBHashPool(p.maxPoolSize, initFunc, templateDB) - // and start the cleaning worker - p.enableworkerCleanUpDirtyDB(pool) - - // pool is ready - p.pools[hash] = pool + p.InitHashPool(ctx, templateDB, initFunc) } p.mutex.Unlock() From 874eff9f009c9b7bdd82bc83aa7311a78a4df457 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 08:14:34 +0000 Subject: [PATCH 040/160] fix mananger tests --- pkg/manager/helpers_test.go | 5 ++ pkg/manager/manager_config.go | 2 +- pkg/manager/manager_test.go | 139 ++++++++++++++++++++++++++-------- 3 files changed, 112 insertions(+), 34 deletions(-) diff --git a/pkg/manager/helpers_test.go b/pkg/manager/helpers_test.go index bf1106f..a93c0de 100644 --- a/pkg/manager/helpers_test.go +++ b/pkg/manager/helpers_test.go @@ -25,6 +25,11 @@ func testManagerFromEnvWithConfig() (*manager.Manager, manager.ManagerConfig) { return manager.New(conf) } +func testManagerWithConfig(conf manager.ManagerConfig) (*manager.Manager, manager.ManagerConfig) { + conf.DatabasePrefix = "pgtestpool" // ensure we don't overlap with other pools running concurrently + return manager.New(conf) +} + // test helpers should never return errors, but are passed the *testing.T instance and fail if needed. It seems to be recommended helper functions are moved to a testing.go file... // https://medium.com/@povilasve/go-advanced-tips-tricks-a872503ac859 // https://about.sourcegraph.com/go/advanced-testing-in-go diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 5eb7ca5..fc47993 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -55,6 +55,6 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), - TestDatabaseWaitTimeout: time.Second * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_WAIT_TIMEOUT_SEC", 10)), + TestDatabaseWaitTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_WAIT_TIMEOUT_MS", 1000)), } } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 71c78c8..5deec0e 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -302,37 +302,39 @@ func TestManagerGetTestDatabase(t *testing.T) { verifyTestDB(t, test) } -// disabled as we were running into timing issues -// func TestManagerGetTestDatabaseTimeout(t *testing.T) { -// ctx := context.Background() +func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { + ctx := context.Background() -// m := testManagerFromEnv() -// if err := m.Initialize(ctx); err != nil { -// t.Fatalf("initializing manager failed: %v", err) -// } + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TestDatabaseWaitTimeout = 10 * time.Nanosecond + // no db created initally in the background + cfg.TestDatabaseInitialPoolSize = 0 + m, _ := testManagerWithConfig(cfg) -// defer disconnectManager(t, m) + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) + } -// hash := "hashinghash" + defer disconnectManager(t, m) -// template, err := m.InitializeTemplateDatabase(ctx, hash) -// if err != nil { -// t.Fatalf("failed to initialize template database: %v", err) -// } + hash := "hashinghash" -// populateTemplateDB(t, template) + template, err := m.InitializeTemplateDatabase(ctx, hash) + if err != nil { + t.Fatalf("failed to initialize template database: %v", err) + } -// if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { -// t.Fatalf("failed to finalize template database: %v", err) -// } + populateTemplateDB(t, template) -// ctxt, cancel := context.WithTimeout(ctx, 10*time.Nanosecond) -// defer cancel() + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } -// if _, err := m.GetTestDatabase(ctxt, hash); err != context.DeadlineExceeded { -// t.Fatalf("received unexpected error, got %v, want %v", err, context.DeadlineExceeded) -// } -// } + // get should succeed because a test DB is created on demand + testDB, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + assert.Equal(t, 0, testDB.ID) +} func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { ctx := context.Background() @@ -607,10 +609,10 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() + cfg.TestDatabaseInitialPoolSize = 3 cfg.TestDatabaseMaxPoolSize = 3 - cfg.DatabasePrefix = "pgtestpool" // ensure we don't overlap with other pools running concurrently + m, _ := testManagerWithConfig(cfg) - m, _ := manager.New(cfg) if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -630,8 +632,49 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { t.Fatalf("failed to finalize template database: %v", err) } - seenIDs := map[int]bool{} + // request many more databases than initally added for i := 0; i <= cfg.TestDatabaseMaxPoolSize*3; i++ { + test, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + assert.NotEmpty(t, test) + + // return testDB after usage + assert.NoError(t, m.ReturnTestDatabase(ctx, hash, test.ID)) + } +} + +func TestManagerGetTestDatabaseExtendingPool(t *testing.T) { + ctx := context.Background() + + cfg := manager.DefaultManagerConfigFromEnv() + // there is just 1 database initially + cfg.TestDatabaseInitialPoolSize = 1 + // should extend up to 10 on demand + cfg.TestDatabaseMaxPoolSize = 10 + cfg.TestDatabaseWaitTimeout = 10 * time.Nanosecond + m, _ := testManagerWithConfig(cfg) + + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) + } + + defer disconnectManager(t, m) + + hash := "hashinghash" + + template, err := m.InitializeTemplateDatabase(ctx, hash) + if err != nil { + t.Fatalf("failed to initialize template database: %v", err) + } + + populateTemplateDB(t, template) + + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } + + seenIDs := map[int]bool{} + for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { test, err := m.GetTestDatabase(ctx, hash) if err != nil { t.Fatalf("failed to get test database: %v", err) @@ -642,7 +685,13 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { } seenIDs[test.ID] = true + + // don't return } + + // should not be able to extend beyond the limit + _, err = m.GetTestDatabase(ctx, hash) + assert.Error(t, err) } func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { @@ -665,7 +714,13 @@ func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { func TestManagerReturnTestDatabase(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + // there is just 1 database initially + cfg.TestDatabaseInitialPoolSize = 1 + // can be extended, but should first reuse existing + cfg.TestDatabaseMaxPoolSize = 3 + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -755,6 +810,8 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { } func TestManagerReturnUnknownTemplateDatabase(t *testing.T) { + t.Skip("disabled: outside of pool functionality") + ctx := context.Background() m := testManagerFromEnv() @@ -803,13 +860,25 @@ func TestManagerMultiFinalize(t *testing.T) { populateTemplateDB(t, template) - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - t.Fatalf("failed to finalize template database: %v", err) - } + go func() { + t := t + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } + }() + go func() { + t := t + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } + }() + go func() { + t := t + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } + }() - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - t.Fatalf("failed to finalize a second time template database (bailout already ready): %v", err) - } } func TestManagerClearTrackedTestDatabases(t *testing.T) { @@ -842,6 +911,10 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { originalID := test.ID + // clear it twice - because why not + if err := m.ClearTrackedTestDatabases(ctx, hash); err != nil { + t.Fatalf("failed to clear tracked test databases: %v", err) + } if err := m.ClearTrackedTestDatabases(ctx, hash); err != nil { t.Fatalf("failed to clear tracked test databases: %v", err) } From efc70ab77bedc2a109b942bb57d146643682761d Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 08:52:56 +0000 Subject: [PATCH 041/160] stop pool worker with msg and assign 'inUse' state when extending pool --- pkg/pool/pool.go | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 138853c..3bf84a7 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -26,6 +26,8 @@ const ( dbStateDirty = iota ) +const stopWorkerMessage int = -1 + type DBPool struct { pools map[string]*dbHashPool // map[hash] mutex sync.RWMutex @@ -187,6 +189,11 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes return db.TestDatabase{}, err } + // because we return it right away, we treat it as 'inUse' + pool.Lock() + pool.dbs[newTestDB.ID].state = dbStateInUse + pool.Unlock() + return newTestDB, nil } @@ -297,12 +304,17 @@ func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Dat } func (pool *dbHashPool) workerCleanUpDirtyDB() { + ctx := context.Background() templateName := pool.templateDB.Config.Database for dirtyID := range pool.dirty { pool.RLock() - if dirtyID >= len(pool.dbs) { + if dirtyID == stopWorkerMessage { + break + } + + if dirtyID < 0 || dirtyID >= len(pool.dbs) { // sanity check, should never happen pool.RUnlock() continue @@ -367,14 +379,19 @@ func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { } func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error { - // close the dirty channel to stop the worker - close(pool.dirty) // ! // dbHashPool locked pool.Lock() defer pool.Unlock() + if len(pool.dbs) == 0 { + return nil + } + + // stop the worker + pool.dirty <- stopWorkerMessage + // remove from back to be able to repeat operation in case of error for id := len(pool.dbs) - 1; id >= 0; id-- { testDB := pool.dbs[id].TestDatabase @@ -386,6 +403,11 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error pool.dbs = pool.dbs[:len(pool.dbs)-1] } + // close all only if removal of all succeeded + pool.dbs = nil + close(pool.dirty) + close(pool.ready) + return nil // dbHashPool unlocked // ! From e7df27496e99401041ff91c9807ac95c2fd16dd9 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 08:54:29 +0000 Subject: [PATCH 042/160] use previous api when returning template --- pkg/db/database.go | 10 ++++++++++ pkg/db/test_database.go | 7 ------- pkg/manager/helpers_test.go | 2 +- pkg/manager/manager.go | 28 +++++++++++++++------------- tests/integresql_test.go | 3 ++- 5 files changed, 28 insertions(+), 22 deletions(-) delete mode 100644 pkg/db/test_database.go diff --git a/pkg/db/database.go b/pkg/db/database.go index 268eefa..9cc0df9 100644 --- a/pkg/db/database.go +++ b/pkg/db/database.go @@ -4,3 +4,13 @@ type Database struct { TemplateHash string `json:"templateHash"` Config DatabaseConfig `json:"config"` } + +type TestDatabase struct { + Database `json:"database"` + + ID int `json:"id"` +} + +type TemplateDatabase struct { + Database `json:"database"` +} diff --git a/pkg/db/test_database.go b/pkg/db/test_database.go deleted file mode 100644 index 0e71c7f..0000000 --- a/pkg/db/test_database.go +++ /dev/null @@ -1,7 +0,0 @@ -package db - -type TestDatabase struct { - Database `json:"database"` - - ID int `json:"id"` -} diff --git a/pkg/manager/helpers_test.go b/pkg/manager/helpers_test.go index a93c0de..27c5048 100644 --- a/pkg/manager/helpers_test.go +++ b/pkg/manager/helpers_test.go @@ -67,7 +67,7 @@ func initTemplateDB(ctx context.Context, errs chan<- error, m *manager.Manager) errs <- nil } -func populateTemplateDB(t *testing.T, template db.Database) { +func populateTemplateDB(t *testing.T, template db.TemplateDatabase) { t.Helper() db, err := sql.Open("postgres", template.Config.ConnectionString()) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 5d29703..707b119 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -159,12 +159,12 @@ func (m *Manager) Initialize(ctx context.Context) error { return nil } -func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (db.Database, error) { +func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { ctx, task := trace.NewTask(ctx, "initialize_template_db") defer task.End() if !m.Ready() { - return db.Database{}, ErrManagerNotReady + return db.TemplateDatabase{}, ErrManagerNotReady } dbName := m.makeTemplateDatabaseName(hash) @@ -181,20 +181,22 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( defer unlock() if !added { - return db.Database{}, ErrTemplateAlreadyInitialized + return db.TemplateDatabase{}, ErrTemplateAlreadyInitialized } reg := trace.StartRegion(ctx, "drop_and_create_db") if err := m.dropAndCreateDatabase(ctx, dbName, m.config.ManagerDatabaseConfig.Username, m.config.TemplateDatabaseTemplate); err != nil { m.templates.RemoveUnsafe(ctx, hash) - return db.Database{}, err + return db.TemplateDatabase{}, err } reg.End() - return db.Database{ - TemplateHash: hash, - Config: templateConfig, + return db.TemplateDatabase{ + Database: db.Database{ + TemplateHash: hash, + Config: templateConfig, + }, }, nil } @@ -228,29 +230,29 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro return m.dropDatabase(ctx, dbName) } -func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db.Database, error) { +func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { ctx, task := trace.NewTask(ctx, "finalize_template_db") defer task.End() if !m.Ready() { - return db.Database{}, ErrManagerNotReady + return db.TemplateDatabase{}, ErrManagerNotReady } template, found := m.templates.Get(ctx, hash) if !found { - return db.Database{}, ErrTemplateNotFound + return db.TemplateDatabase{}, ErrTemplateNotFound } state := template.GetState(ctx) // early bailout if we are already ready (multiple calls) if state == templates.TemplateStateFinalized { - return template.Database, ErrTemplateAlreadyInitialized + return db.TemplateDatabase{Database: template.Database}, ErrTemplateAlreadyInitialized } // Disallow transition from discarded to ready if state == templates.TemplateStateDiscarded { - return db.Database{}, ErrTemplateDiscarded + return db.TemplateDatabase{}, ErrTemplateDiscarded } // Init a pool with this hash @@ -262,7 +264,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db template.SetState(ctx, templates.TemplateStateFinalized) m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) - return template.Database, nil + return db.TemplateDatabase{Database: template.Database}, nil } func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatabase, error) { diff --git a/tests/integresql_test.go b/tests/integresql_test.go index 6cb9c2d..2b20446 100644 --- a/tests/integresql_test.go +++ b/tests/integresql_test.go @@ -62,6 +62,7 @@ func BenchmarkGetDatabaseFromNewTemplate(b *testing.B) { require.NoError(b, client.DiscardTemplate(ctx, newTemplateHash)) } }) + } func BenchmarkGetDatabaseFromExistingTemplate(b *testing.B) { @@ -109,8 +110,8 @@ func BenchmarkGetDatabaseFromExistingTemplate(b *testing.B) { require.NoError(b, client.ReturnTestDatabase(ctx, newTemplateHash, dbConfig.ID)) } }) - require.NoError(b, client.DiscardTemplate(ctx, newTemplateHash)) + b.Cleanup(func() { require.NoError(b, client.DiscardTemplate(ctx, newTemplateHash)) }) } // nolint: deadcode From a2fe093c5f3103910336e326f53cfbc0d36231a8 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 09:49:05 +0000 Subject: [PATCH 043/160] fix deadlock when init pool --- pkg/pool/pool.go | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 3bf84a7..1834bf7 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -65,6 +65,10 @@ func (p *DBPool) InitHashPool(ctx context.Context, templateDB db.Database, initD p.mutex.Lock() defer p.mutex.Unlock() + _ = p.initHashPool(ctx, templateDB, initDBFunc) +} + +func (p *DBPool) initHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { // create a new dbHashPool pool := newDBHashPool(p.maxPoolSize, initDBFunc, templateDB) // and start the cleaning worker @@ -72,6 +76,8 @@ func (p *DBPool) InitHashPool(ctx context.Context, templateDB db.Database, initD // pool is ready p.pools[pool.templateDB.TemplateHash] = pool + + return pool } func (p *DBPool) Stop() { @@ -148,7 +154,7 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in pool := p.pools[hash] if pool == nil { - p.InitHashPool(ctx, templateDB, initFunc) + pool = p.initHashPool(ctx, templateDB, initFunc) } p.mutex.Unlock() @@ -380,18 +386,14 @@ func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error { + // stop the worker + pool.dirty <- stopWorkerMessage + // ! // dbHashPool locked pool.Lock() defer pool.Unlock() - if len(pool.dbs) == 0 { - return nil - } - - // stop the worker - pool.dirty <- stopWorkerMessage - // remove from back to be able to repeat operation in case of error for id := len(pool.dbs) - 1; id >= 0; id-- { testDB := pool.dbs[id].TestDatabase @@ -400,7 +402,9 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error return err } - pool.dbs = pool.dbs[:len(pool.dbs)-1] + if len(pool.dbs) > 1 { + pool.dbs = pool.dbs[:len(pool.dbs)-1] + } } // close all only if removal of all succeeded From 06aeab98c38f92163c7ec5fc35db1671fffc41b9 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 10:29:37 +0000 Subject: [PATCH 044/160] vs debug config --- .vscode/launch.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index c710274..27dace7 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -9,7 +9,7 @@ "type": "go", "request": "launch", "mode": "auto", - "program": "${fileDirname}/cmd/server", + "program": "${workspaceFolder}/cmd/server", "env": {}, "args": [] } From fb28311c065df1854b4b6e9f9c2ddc9a61141660 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 10:30:22 +0000 Subject: [PATCH 045/160] remove all from pool before discarding template --- pkg/manager/manager.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 707b119..4b9a401 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -209,6 +209,13 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro return ErrManagerNotReady } + // first remove all DB with this hash + if err := m.pool.RemoveAllWithHash(ctx, hash, func(testDB db.TestDatabase) error { + return m.dropDatabase(ctx, testDB.Database.Config.Database) + }); err != nil && !errors.Is(err, pool.ErrUnknownHash) { + return err + } + template, found := m.templates.Pop(ctx, hash) dbName := template.Config.Database From 1ffcc14706241a96d780fa2cc4cde0391d405115 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 10:31:03 +0000 Subject: [PATCH 046/160] use RLock for get --- pkg/pool/pool.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 1834bf7..6168641 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -97,17 +97,17 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. // ! // DBPool locked - p.mutex.Lock() + p.mutex.RLock() pool := p.pools[hash] if pool == nil { // no such pool - p.mutex.Unlock() + p.mutex.RUnlock() err = ErrUnknownHash return } - p.mutex.Unlock() + p.mutex.RUnlock() // DBPool unlocked // ! @@ -394,6 +394,10 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error pool.Lock() defer pool.Unlock() + if len(pool.dbs) == 0 { + return nil + } + // remove from back to be able to repeat operation in case of error for id := len(pool.dbs) - 1; id >= 0; id-- { testDB := pool.dbs[id].TestDatabase From fa98c1c30b517947cf85cbe7b9203fcd8628fc6e Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 10:56:28 +0000 Subject: [PATCH 047/160] move wg to dbHashPool --- pkg/pool/pool.go | 33 +++++++++++++++++---------------- pkg/pool/pool_test.go | 8 ++++---- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 6168641..062967b 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -15,7 +15,7 @@ var ( ErrPoolFull = errors.New("database pool is full") ErrInvalidState = errors.New("database state is not valid for this operation") ErrInvalidIndex = errors.New("invalid db.Database index (id)") - ErrTimeout = errors.New("timeout on waiting for ready db") + ErrTimeout = errors.New("timeout when waiting for ready db") ) type dbState int @@ -33,7 +33,6 @@ type DBPool struct { mutex sync.RWMutex maxPoolSize int - wg sync.WaitGroup } func NewDBPool(maxPoolSize int) *DBPool { @@ -59,6 +58,7 @@ type dbHashPool struct { recreateDB RecreateDBFunc templateDB db.Database sync.RWMutex + wg sync.WaitGroup } func (p *DBPool) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { @@ -72,7 +72,7 @@ func (p *DBPool) initHashPool(ctx context.Context, templateDB db.Database, initD // create a new dbHashPool pool := newDBHashPool(p.maxPoolSize, initDBFunc, templateDB) // and start the cleaning worker - p.enableworkerCleanUpDirtyDB(pool) + pool.enableWorker() // pool is ready p.pools[pool.templateDB.TemplateHash] = pool @@ -80,17 +80,16 @@ func (p *DBPool) initHashPool(ctx context.Context, templateDB db.Database, initD return pool } +// Stop is used to stop all background workers func (p *DBPool) Stop() { p.mutex.Lock() defer p.mutex.Unlock() for _, pool := range p.pools { close(pool.dirty) + pool.wg.Wait() } - p.wg.Wait() - for _, pool := range p.pools { - close(pool.ready) - } + } func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { @@ -272,14 +271,6 @@ func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc // ! } -func (p *DBPool) enableworkerCleanUpDirtyDB(pool *dbHashPool) { - p.wg.Add(1) - go func() { - defer p.wg.Done() - pool.workerCleanUpDirtyDB() - }() -} - func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) error) error { // ! // DBPool locked @@ -309,17 +300,25 @@ func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Dat } } +func (pool *dbHashPool) enableWorker() { + pool.wg.Add(1) + go func() { + defer pool.wg.Done() + pool.workerCleanUpDirtyDB() + }() +} + func (pool *dbHashPool) workerCleanUpDirtyDB() { ctx := context.Background() templateName := pool.templateDB.Config.Database for dirtyID := range pool.dirty { - pool.RLock() if dirtyID == stopWorkerMessage { break } + pool.RLock() if dirtyID < 0 || dirtyID >= len(pool.dbs) { // sanity check, should never happen pool.RUnlock() @@ -387,7 +386,9 @@ func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error { // stop the worker + // we don't close here because if the remove operation fails, we want to be able to repeat it pool.dirty <- stopWorkerMessage + pool.wg.Wait() // ! // dbHashPool locked diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index 5dde061..9d6beea 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -88,9 +88,9 @@ func TestPoolAddGetConcurrent(t *testing.T) { var wg sync.WaitGroup sleepDuration := 100 * time.Millisecond - // add test databases first to initialize hash pool - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) + // initialize hash pool + p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB2, initFunc) // add DB in one goroutine wg.Add(1) @@ -102,7 +102,7 @@ func TestPoolAddGetConcurrent(t *testing.T) { sleepDuration := sleepDuration // add DBs sequentially - for i := 0; i < maxPoolSize-1; i++ { + for i := 0; i < maxPoolSize; i++ { assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) time.Sleep(sleepDuration) From e5da1714e226d507a17c0a683670236d85f4fdfd Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 12:10:32 +0000 Subject: [PATCH 048/160] check if manager ready before using db --- pkg/manager/manager.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 4b9a401..853825f 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -108,7 +108,7 @@ func (m *Manager) Disconnect(ctx context.Context, ignoreCloseError bool) error { // we didn't manage to stop on time background routines // but we will continue and close the DB connection // TODO anna: error handling - // fmt.Println("integresql: timeout when stopping background tasks") + fmt.Println("integresql: timeout when stopping background tasks") } if err := m.db.Close(); err != nil && !ignoreCloseError { @@ -264,7 +264,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db // Init a pool with this hash initDBFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { - return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, template.Config.Database) + return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) } m.pool.InitHashPool(ctx, template.Database, initDBFunc) @@ -394,6 +394,10 @@ func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { } func (m *Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owner string, template string) error { + if !m.Ready() { + return ErrManagerNotReady + } + if err := m.dropDatabase(ctx, dbName); err != nil { return err } @@ -410,7 +414,7 @@ func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template * } initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { - return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, template.Config.Database) + return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) } return m.pool.AddTestDatabase(ctx, template.Database, initFunc) From d1b598657fb531d192a13abe979d3f2cbfe54708 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 13:17:48 +0000 Subject: [PATCH 049/160] discard template after test --- pkg/manager/manager_test.go | 6 ++++++ pkg/pool/pool.go | 3 +-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 5deec0e..224331f 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -641,6 +641,9 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { // return testDB after usage assert.NoError(t, m.ReturnTestDatabase(ctx, hash, test.ID)) } + + // discard the template + assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) } func TestManagerGetTestDatabaseExtendingPool(t *testing.T) { @@ -692,6 +695,9 @@ func TestManagerGetTestDatabaseExtendingPool(t *testing.T) { // should not be able to extend beyond the limit _, err = m.GetTestDatabase(ctx, hash) assert.Error(t, err) + + // discard the template + assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) } func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 062967b..cf43304 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -14,7 +14,7 @@ var ( ErrUnknownHash = errors.New("no database pool exists for this hash") ErrPoolFull = errors.New("database pool is full") ErrInvalidState = errors.New("database state is not valid for this operation") - ErrInvalidIndex = errors.New("invalid db.Database index (id)") + ErrInvalidIndex = errors.New("invalid database index (id)") ErrTimeout = errors.New("timeout when waiting for ready db") ) @@ -415,7 +415,6 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error // close all only if removal of all succeeded pool.dbs = nil close(pool.dirty) - close(pool.ready) return nil // dbHashPool unlocked From f8e2fdc9409dc7ea6489bcb26d45a6f8d204e254 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 22 Jun 2023 13:57:01 +0000 Subject: [PATCH 050/160] allow to return db not existing in pool --- internal/api/templates/templates.go | 3 +- pkg/manager/manager.go | 54 ++++++++++++++++++++++++----- pkg/manager/manager_test.go | 13 ++++--- pkg/pool/pool.go | 10 ++++-- 4 files changed, 62 insertions(+), 18 deletions(-) diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 3f81548..d4c4c2a 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -8,7 +8,6 @@ import ( "github.com/allaboutapps/integresql/internal/api" "github.com/allaboutapps/integresql/pkg/manager" - "github.com/allaboutapps/integresql/pkg/pool" "github.com/labstack/echo/v4" ) @@ -133,7 +132,7 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return echo.ErrServiceUnavailable case manager.ErrTemplateNotFound: return echo.NewHTTPError(http.StatusNotFound, "template not found") - case pool.ErrUnknownHash: + case manager.ErrTestNotFound: return echo.NewHTTPError(http.StatusNotFound, "test database not found") default: return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 853825f..26502d6 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -19,8 +19,9 @@ var ( ErrManagerNotReady = errors.New("manager not ready") ErrTemplateAlreadyInitialized = errors.New("template is already initialized") ErrTemplateNotFound = errors.New("template not found") - ErrInvalidTemplateState = errors.New("unexpected template state") + ErrTestNotFound = errors.New("test database not found") ErrTemplateDiscarded = errors.New("template is discarded, can't be used") + ErrInvalidTemplateState = errors.New("unexpected template state") ) type Manager struct { @@ -298,6 +299,18 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData if errors.Is(err, pool.ErrTimeout) { // on timeout we can try to extend the pool testDB, err = m.pool.ExtendPool(ctx, template.Database) + } else if errors.Is(err, pool.ErrUnknownHash) { + // the pool has been removed, it needs to be reinitialized + initDBFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) + } + m.pool.InitHashPool(ctx, template.Database, initDBFunc) + + // pool initalized, create one test db + testDB, err = m.pool.ExtendPool(ctx, template.Database) + // // and add new test DBs in the background + // m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) + } if err != nil { @@ -314,18 +327,36 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e // check if the template exists and is 'ready' template, found := m.templates.Get(ctx, hash) - if !found { - return ErrTemplateNotFound + if found { + if template.WaitUntilFinalized(ctx, m.config.TestDatabaseWaitTimeout) != + templates.TemplateStateFinalized { + + return ErrInvalidTemplateState + } + + // template is ready, we can return the testDB to the pool + if err := m.pool.ReturnTestDatabase(ctx, hash, id); err != nil { + if errors.Is(err, pool.ErrInvalidIndex) || + errors.Is(err, pool.ErrUnknownHash) { + // simply drop this database below + } else { + // other error is an internal error + return err + } + } } - if template.WaitUntilFinalized(ctx, m.config.TestDatabaseWaitTimeout) != - templates.TemplateStateFinalized { + dbName := pool.MakeDBName(m.makeTemplateDatabaseName(hash), id) + exists, err := m.checkDatabaseExists(ctx, dbName) + if err != nil { + return err + } - return ErrInvalidTemplateState + if !exists { + return ErrTestNotFound } - // template is ready, we can return the testDB to the pool - return m.pool.ReturnTestDatabase(ctx, hash, id) + return m.dropDatabase(ctx, dbName) } func (m *Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) error { @@ -337,7 +368,12 @@ func (m *Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) er return m.dropDatabase(ctx, testDB.Config.Database) } - return m.pool.RemoveAllWithHash(ctx, hash, removeFunc) + err := m.pool.RemoveAllWithHash(ctx, hash, removeFunc) + if errors.Is(err, pool.ErrUnknownHash) { + return ErrTemplateNotFound + } + + return err } func (m *Manager) ResetAllTracking(ctx context.Context) error { diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 224331f..8cd6b6a 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -816,8 +816,6 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { } func TestManagerReturnUnknownTemplateDatabase(t *testing.T) { - t.Skip("disabled: outside of pool functionality") - ctx := context.Background() m := testManagerFromEnv() @@ -866,25 +864,32 @@ func TestManagerMultiFinalize(t *testing.T) { populateTemplateDB(t, template) + var wg sync.WaitGroup + wg.Add(3) go func() { + defer wg.Done() t := t if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { t.Fatalf("failed to finalize template database: %v", err) } }() go func() { + defer wg.Done() t := t if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { t.Fatalf("failed to finalize template database: %v", err) } }() go func() { + defer wg.Done() t := t if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { t.Fatalf("failed to finalize template database: %v", err) } }() + wg.Wait() + } func TestManagerClearTrackedTestDatabases(t *testing.T) { @@ -921,9 +926,7 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { if err := m.ClearTrackedTestDatabases(ctx, hash); err != nil { t.Fatalf("failed to clear tracked test databases: %v", err) } - if err := m.ClearTrackedTestDatabases(ctx, hash); err != nil { - t.Fatalf("failed to clear tracked test databases: %v", err) - } + assert.ErrorIs(t, m.ClearTrackedTestDatabases(ctx, hash), manager.ErrTemplateNotFound) test, err = m.GetTestDatabase(ctx, hash) if err != nil { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index cf43304..8996312 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -366,9 +366,10 @@ func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { }, ID: index, } - // db name has an ID in suffix + + // set DB name templateName := pool.templateDB.Config.Database - dbName := fmt.Sprintf("%s_%03d", templateName, index) + dbName := MakeDBName(templateName, index) newTestDB.Database.Config.Database = dbName if err := pool.recreateDB(ctx, newTestDB, templateName); err != nil { @@ -420,3 +421,8 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error // dbHashPool unlocked // ! } + +func MakeDBName(templateName string, id int) string { + // db name has an ID in suffix + return fmt.Sprintf("%s_%03d", templateName, id) +} From 74c26faa46949c9c2348cd3ec471eba2aaa06c3c Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 23 Jun 2023 07:43:30 +0200 Subject: [PATCH 051/160] set correct state inside extend func --- pkg/pool/pool.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 8996312..96f92ff 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -160,7 +160,7 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in // DBPool unlocked // ! - newTestDB, err := pool.extend(ctx) + newTestDB, err := pool.extend(ctx, dbStateReady) if err != nil { return err } @@ -189,16 +189,12 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes // DBPool unlocked // ! - newTestDB, err := pool.extend(ctx) + // because we return it right away, we treat it as 'inUse' + newTestDB, err := pool.extend(ctx, dbStateInUse) if err != nil { return db.TestDatabase{}, err } - // because we return it right away, we treat it as 'inUse' - pool.Lock() - pool.dbs[newTestDB.ID].state = dbStateInUse - pool.Unlock() - return newTestDB, nil } @@ -346,7 +342,7 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { } } -func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { +func (pool *dbHashPool) extend(ctx context.Context, state dbState) (db.TestDatabase, error) { // ! // dbHashPool locked pool.Lock() @@ -377,7 +373,7 @@ func (pool *dbHashPool) extend(ctx context.Context) (db.TestDatabase, error) { } // add new test DB to the pool - pool.dbs = append(pool.dbs, existingDB{state: dbStateReady, TestDatabase: newTestDB}) + pool.dbs = append(pool.dbs, existingDB{state: state, TestDatabase: newTestDB}) return newTestDB, nil // dbHashPool unlocked From 2dabe15570757d14a08112b13ec6e184513afe46 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 07:06:16 +0000 Subject: [PATCH 052/160] fix test db name --- pkg/manager/manager.go | 17 +++++++++++++++-- pkg/manager/manager_test.go | 30 +++++++++++++++++++----------- pkg/pool/pool.go | 30 ++++++++++++++++++++---------- pkg/pool/pool_test.go | 12 ++++++------ 4 files changed, 60 insertions(+), 29 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 26502d6..bb44830 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -37,12 +37,21 @@ type Manager struct { } func New(config ManagerConfig) (*Manager, ManagerConfig) { + + var testDBPrefix string + if config.DatabasePrefix != "" { + testDBPrefix = testDBPrefix + fmt.Sprintf("%s_", config.DatabasePrefix) + } + if config.TestDatabasePrefix != "" { + testDBPrefix = testDBPrefix + fmt.Sprintf("%s_", config.TestDatabasePrefix) + } + m := &Manager{ config: config, db: nil, wg: sync.WaitGroup{}, templates: templates.NewCollection(), - pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize), + pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize, testDBPrefix), connectionCtx: context.TODO(), } @@ -346,7 +355,7 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e } } - dbName := pool.MakeDBName(m.makeTemplateDatabaseName(hash), id) + dbName := m.pool.MakeDBName(hash, id) exists, err := m.checkDatabaseExists(ctx, dbName) if err != nil { return err @@ -478,3 +487,7 @@ func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Templa func (m *Manager) makeTemplateDatabaseName(hash string) string { return fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) } + +func (m *Manager) makeTestDatabaseName(hash string) string { + return fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TestDatabasePrefix, hash) +} diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 8cd6b6a..d236916 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -3,6 +3,7 @@ package manager_test import ( "context" "database/sql" + "errors" "fmt" "sync" "testing" @@ -866,30 +867,37 @@ func TestManagerMultiFinalize(t *testing.T) { var wg sync.WaitGroup wg.Add(3) - go func() { - defer wg.Done() + + errChan := make(chan error, 3) + finalize := func(errChan chan<- error) { t := t - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + _, err := m.FinalizeTemplateDatabase(ctx, hash) + if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { + errChan <- err + return + } + if err != nil { t.Fatalf("failed to finalize template database: %v", err) } + } + go func() { + defer wg.Done() + finalize(errChan) }() go func() { defer wg.Done() - t := t - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - t.Fatalf("failed to finalize template database: %v", err) - } + finalize(errChan) }() go func() { defer wg.Done() - t := t - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - t.Fatalf("failed to finalize template database: %v", err) - } + finalize(errChan) }() wg.Wait() + errCount := len(errChan) + assert.Equal(t, 2, errCount) + } func TestManagerClearTrackedTestDatabases(t *testing.T) { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 96f92ff..8a6dadc 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -33,13 +33,16 @@ type DBPool struct { mutex sync.RWMutex maxPoolSize int + + dbNamePrefix string } -func NewDBPool(maxPoolSize int) *DBPool { +func NewDBPool(maxPoolSize int, testDBNamePrefix string) *DBPool { return &DBPool{ pools: make(map[string]*dbHashPool), - maxPoolSize: maxPoolSize, + maxPoolSize: maxPoolSize, + dbNamePrefix: testDBNamePrefix, } } @@ -160,7 +163,7 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in // DBPool unlocked // ! - newTestDB, err := pool.extend(ctx, dbStateReady) + newTestDB, err := pool.extend(ctx, dbStateReady, p.dbNamePrefix) if err != nil { return err } @@ -190,7 +193,7 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes // ! // because we return it right away, we treat it as 'inUse' - newTestDB, err := pool.extend(ctx, dbStateInUse) + newTestDB, err := pool.extend(ctx, dbStateInUse, p.dbNamePrefix) if err != nil { return db.TestDatabase{}, err } @@ -342,7 +345,7 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { } } -func (pool *dbHashPool) extend(ctx context.Context, state dbState) (db.TestDatabase, error) { +func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix string) (db.TestDatabase, error) { // ! // dbHashPool locked pool.Lock() @@ -364,11 +367,11 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState) (db.TestDatab } // set DB name - templateName := pool.templateDB.Config.Database - dbName := MakeDBName(templateName, index) + dbName := makeDBName(testDBPrefix, pool.templateDB.TemplateHash, index) newTestDB.Database.Config.Database = dbName - if err := pool.recreateDB(ctx, newTestDB, templateName); err != nil { + templateDB := pool.templateDB.Config.Database + if err := pool.recreateDB(ctx, newTestDB, templateDB); err != nil { return db.TestDatabase{}, err } @@ -418,7 +421,14 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error // ! } -func MakeDBName(templateName string, id int) string { +func (p *DBPool) MakeDBName(hash string, id int) string { + p.mutex.RLock() + p.mutex.RUnlock() + + return makeDBName(p.dbNamePrefix, hash, id) +} + +func makeDBName(testDBPrefix string, hash string, id int) string { // db name has an ID in suffix - return fmt.Sprintf("%s_%03d", templateName, id) + return fmt.Sprintf("%s%s_%03d", testDBPrefix, hash, id) } diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index 9d6beea..638735f 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -15,7 +15,7 @@ func TestPoolAddGet(t *testing.T) { t.Parallel() ctx := context.Background() - p := pool.NewDBPool(2) + p := pool.NewDBPool(2, "prefix_") hash1 := "h1" hash2 := "h2" @@ -23,7 +23,7 @@ func TestPoolAddGet(t *testing.T) { TemplateHash: hash1, Config: db.DatabaseConfig{ Username: "ich", - Database: "template_name", + Database: "templateDBname", }, } initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { @@ -40,7 +40,7 @@ func TestPoolAddGet(t *testing.T) { // get it testDB, err := p.GetTestDatabase(ctx, hash1, 0) assert.NoError(t, err) - assert.Equal(t, "template_name_000", testDB.Database.Config.Database) + assert.Equal(t, "prefix_h1_000", testDB.Database.Config.Database) assert.Equal(t, "ich", testDB.Database.Config.Username) // add for h2 @@ -83,7 +83,7 @@ func TestPoolAddGetConcurrent(t *testing.T) { } maxPoolSize := 6 - p := pool.NewDBPool(maxPoolSize) + p := pool.NewDBPool(maxPoolSize, "") var wg sync.WaitGroup sleepDuration := 100 * time.Millisecond @@ -149,7 +149,7 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { } maxPoolSize := 6 - p := pool.NewDBPool(maxPoolSize) + p := pool.NewDBPool(maxPoolSize, "") var wg sync.WaitGroup @@ -203,7 +203,7 @@ func TestPoolRemoveAll(t *testing.T) { } maxPoolSize := 6 - p := pool.NewDBPool(maxPoolSize) + p := pool.NewDBPool(maxPoolSize, "") // add DBs sequentially for i := 0; i < maxPoolSize; i++ { From a3ff1c30876852728652ebed3f37269bec1b7bb8 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 07:13:56 +0000 Subject: [PATCH 053/160] lock template when finalizing --- pkg/manager/manager.go | 5 +++-- pkg/templates/template.go | 23 +++++++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index bb44830..a967000 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -260,7 +260,8 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db return db.TemplateDatabase{}, ErrTemplateNotFound } - state := template.GetState(ctx) + state, lockedTemplate := template.GetStateWithLock(ctx) + defer lockedTemplate.Unlock() // early bailout if we are already ready (multiple calls) if state == templates.TemplateStateFinalized { @@ -278,7 +279,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db } m.pool.InitHashPool(ctx, template.Database, initDBFunc) - template.SetState(ctx, templates.TemplateStateFinalized) + lockedTemplate.SetState(ctx, templates.TemplateStateFinalized) m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) return db.TemplateDatabase{Database: template.Database}, nil diff --git a/pkg/templates/template.go b/pkg/templates/template.go index 076b737..afe25b3 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -73,3 +73,26 @@ func (t *Template) WaitUntilFinalized(ctx context.Context, timeout time.Duration } return newState } + +func (t *Template) GetStateWithLock(ctx context.Context) (TemplateState, lockedTemplate) { + t.mutex.Lock() + + return t.state, lockedTemplate{t: t} +} + +type lockedTemplate struct { + t *Template +} + +func (l lockedTemplate) Unlock() { + l.t.mutex.Unlock() +} + +func (l lockedTemplate) SetState(ctx context.Context, newState TemplateState) { + if l.t.state == newState { + return + } + + l.t.state = newState + l.t.cond.Broadcast() +} From d7f185ffab35c4e50ae8b03667dd4c9db4596187 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 07:19:45 +0000 Subject: [PATCH 054/160] remove richgo from Makefile --- Makefile | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index aa3b0ea..ed68c88 100644 --- a/Makefile +++ b/Makefile @@ -4,19 +4,28 @@ build: format gobuild lint format: go fmt -gobuild: +gobuild: go build -o bin/integresql ./cmd/server lint: golangci-lint run --fast -# https://github.com/golang/go/issues/24573 -# w/o cache - see "go help testflag" -# use https://github.com/kyoh86/richgo to color +# https://github.com/gotestyourself/gotestsum#format +# w/o cache https://github.com/golang/go/issues/24573 - see "go help testflag" # note that these tests should not run verbose by default (e.g. use your IDE for this) # TODO: add test shuffling/seeding when landed in go v1.15 (https://github.com/golang/go/issues/28592) -test: - richgo test -cover -race -count=1 ./... +# tests by pkgname +test: ##- Run tests, output by package, print coverage. + @$(MAKE) go-test-by-pkg + @$(MAKE) go-test-print-coverage + +# note that we explicitly don't want to use a -coverpkg=./... option, per pkg coverage take precedence +go-test-by-pkg: ##- (opt) Run tests, output by package. + gotestsum --format pkgname-and-test-fails --jsonfile /tmp/test.log -- -race -cover -count=1 -coverprofile=/tmp/coverage.out ./... + +go-test-print-coverage: ##- (opt) Print overall test coverage (must be done after running tests). + @printf "coverage " + @go tool cover -func=/tmp/coverage.out | tail -n 1 | awk '{$$1=$$1;print}' init: modules tools tidy @go version From 9abaceaf5c49f8c0252118274f7e9cf54b105d3b Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 07:57:19 +0000 Subject: [PATCH 055/160] launch config: build before launching --- .vscode/launch.json | 3 ++- .vscode/tasks.json | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 27dace7..b80d00a 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -11,7 +11,8 @@ "mode": "auto", "program": "${workspaceFolder}/cmd/server", "env": {}, - "args": [] + "args": [], + "preLaunchTask": "build" } ] } \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json index a58e5bf..c7c1385 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -11,6 +11,14 @@ "kind": "build", "isDefault": true } + }, + { + "label": "build", + "type": "shell", + "command": "make build", + "group": { + "kind": "build" + } } ] } \ No newline at end of file From 864ea4a9f20d38cfa5060b510648d0f60552a7c4 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 07:58:48 +0000 Subject: [PATCH 056/160] return ErrTestDBInUse when deleting --- internal/api/templates/templates.go | 2 ++ pkg/manager/manager.go | 10 ++++++---- tests/integresql_test.go | 2 ++ 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index d4c4c2a..7483cee 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -134,6 +134,8 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusNotFound, "template not found") case manager.ErrTestNotFound: return echo.NewHTTPError(http.StatusNotFound, "test database not found") + case manager.ErrTestDBInUse: + return echo.NewHTTPError(http.StatusLocked, manager.ErrTestDBInUse.Error()) default: return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index a967000..186f1e5 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "runtime/trace" + "strings" "sync" "github.com/allaboutapps/integresql/pkg/db" @@ -22,6 +23,7 @@ var ( ErrTestNotFound = errors.New("test database not found") ErrTemplateDiscarded = errors.New("template is discarded, can't be used") ErrInvalidTemplateState = errors.New("unexpected template state") + ErrTestDBInUse = errors.New("test database is in use, close the connection before dropping") ) type Manager struct { @@ -433,6 +435,10 @@ func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { defer trace.StartRegion(ctx, "drop_db").End() if _, err := m.db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { + if strings.Contains(err.Error(), "is being accessed by other users") { + return ErrTestDBInUse + } + return err } @@ -488,7 +494,3 @@ func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Templa func (m *Manager) makeTemplateDatabaseName(hash string) string { return fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) } - -func (m *Manager) makeTestDatabaseName(hash string) string { - return fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TestDatabasePrefix, hash) -} diff --git a/tests/integresql_test.go b/tests/integresql_test.go index 2b20446..70bb762 100644 --- a/tests/integresql_test.go +++ b/tests/integresql_test.go @@ -57,6 +57,7 @@ func BenchmarkGetDatabaseFromNewTemplate(b *testing.B) { var userCnt int require.NoError(b, row.Scan(&userCnt)) assert.Equal(b, 2, userCnt) + db.Close() require.NoError(b, client.ReturnTestDatabase(ctx, newTemplateHash, dbConfig.ID)) require.NoError(b, client.DiscardTemplate(ctx, newTemplateHash)) @@ -106,6 +107,7 @@ func BenchmarkGetDatabaseFromExistingTemplate(b *testing.B) { var userCnt int require.NoError(b, row.Scan(&userCnt)) assert.Equal(b, 1, userCnt) + db.Close() require.NoError(b, client.ReturnTestDatabase(ctx, newTemplateHash, dbConfig.ID)) } From 558c1b242c9855f96d15c0f1ad7bc1a92bd90f1b Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 08:44:30 +0000 Subject: [PATCH 057/160] add bench to Makefile --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index ed68c88..94dd576 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,9 @@ gobuild: lint: golangci-lint run --fast +bench: ##- Run tests, output by package, print coverage. + @go test -benchmem=false -run=./... -bench . github.com/allaboutapps/integresql/tests -race -count=4 -v + # https://github.com/gotestyourself/gotestsum#format # w/o cache https://github.com/golang/go/issues/24573 - see "go help testflag" # note that these tests should not run verbose by default (e.g. use your IDE for this) From 099e8d1635df3ce564fb78ec739e5746ea2f9acd Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 09:03:02 +0000 Subject: [PATCH 058/160] fix returning test database --- pkg/manager/manager.go | 20 ++++++++++++-------- tests/integresql_test.go | 3 ++- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 186f1e5..097614b 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -347,15 +347,19 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e } // template is ready, we can return the testDB to the pool - if err := m.pool.ReturnTestDatabase(ctx, hash, id); err != nil { - if errors.Is(err, pool.ErrInvalidIndex) || - errors.Is(err, pool.ErrUnknownHash) { - // simply drop this database below - } else { - // other error is an internal error - return err - } + err := m.pool.ReturnTestDatabase(ctx, hash, id) + if err == nil { + return nil } + + if !(errors.Is(err, pool.ErrInvalidIndex) || + errors.Is(err, pool.ErrUnknownHash)) { + // other error is an internal error + return err + } + + // db is not tracked in the pool + // try to drop it if exists below } dbName := m.pool.MakeDBName(hash, id) diff --git a/tests/integresql_test.go b/tests/integresql_test.go index 70bb762..b687178 100644 --- a/tests/integresql_test.go +++ b/tests/integresql_test.go @@ -59,7 +59,6 @@ func BenchmarkGetDatabaseFromNewTemplate(b *testing.B) { assert.Equal(b, 2, userCnt) db.Close() - require.NoError(b, client.ReturnTestDatabase(ctx, newTemplateHash, dbConfig.ID)) require.NoError(b, client.DiscardTemplate(ctx, newTemplateHash)) } }) @@ -107,6 +106,8 @@ func BenchmarkGetDatabaseFromExistingTemplate(b *testing.B) { var userCnt int require.NoError(b, row.Scan(&userCnt)) assert.Equal(b, 1, userCnt) + // keep the DB for some time before returning + time.Sleep(time.Second) db.Close() require.NoError(b, client.ReturnTestDatabase(ctx, newTemplateHash, dbConfig.ID)) From c30a1f0c174b5780aea49f6fea52011ad5486c7c Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 11:32:33 +0000 Subject: [PATCH 059/160] split template finalize and db get timeout --- pkg/manager/manager.go | 8 ++++---- pkg/manager/manager_config.go | 6 ++++-- pkg/manager/manager_test.go | 15 +++++++++++---- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 097614b..0423476 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -302,12 +302,12 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData // if the template has been discarded/not initalized yet, // no DB should be returned, even if already in the pool - state := template.WaitUntilFinalized(ctx, m.config.TestDatabaseWaitTimeout) + state := template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) if state != templates.TemplateStateFinalized { return db.TestDatabase{}, ErrInvalidTemplateState } - testDB, err := m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseWaitTimeout) + testDB, err := m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseGetTimeout) if errors.Is(err, pool.ErrTimeout) { // on timeout we can try to extend the pool testDB, err = m.pool.ExtendPool(ctx, template.Database) @@ -340,7 +340,7 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e // check if the template exists and is 'ready' template, found := m.templates.Get(ctx, hash) if found { - if template.WaitUntilFinalized(ctx, m.config.TestDatabaseWaitTimeout) != + if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != templates.TemplateStateFinalized { return ErrInvalidTemplateState @@ -464,7 +464,7 @@ func (m *Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owne // createTestDatabaseFromTemplate adds a new test database in the pool (increasing its size) basing on the given template. // It waits until the template is ready. func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template *templates.Template) error { - if template.WaitUntilFinalized(ctx, m.config.TestDatabaseWaitTimeout) != templates.TemplateStateFinalized { + if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != templates.TemplateStateFinalized { // if the state changed in the meantime, return return ErrInvalidTemplateState } diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index fc47993..538c5d5 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -18,7 +18,8 @@ type ManagerConfig struct { TestDatabaseOwnerPassword string TestDatabaseInitialPoolSize int TestDatabaseMaxPoolSize int - TestDatabaseWaitTimeout time.Duration + TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state + TestDatabaseGetTimeout time.Duration // Time to wait for a ready database before extending the pool } func DefaultManagerConfigFromEnv() ManagerConfig { @@ -55,6 +56,7 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), - TestDatabaseWaitTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_WAIT_TIMEOUT_MS", 1000)), + TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 100)), + TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), } } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index d236916..f26b74e 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -307,7 +307,7 @@ func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseWaitTimeout = 10 * time.Nanosecond + cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond // no db created initally in the background cfg.TestDatabaseInitialPoolSize = 0 m, _ := testManagerWithConfig(cfg) @@ -340,7 +340,10 @@ func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TemplateFinalizeTimeout = 1 * time.Second + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -655,7 +658,7 @@ func TestManagerGetTestDatabaseExtendingPool(t *testing.T) { cfg.TestDatabaseInitialPoolSize = 1 // should extend up to 10 on demand cfg.TestDatabaseMaxPoolSize = 10 - cfg.TestDatabaseWaitTimeout = 10 * time.Nanosecond + cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -903,7 +906,11 @@ func TestManagerMultiFinalize(t *testing.T) { func TestManagerClearTrackedTestDatabases(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + // there are no db added in background + cfg.TestDatabaseInitialPoolSize = 0 + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } From cf2611540b5da7b1ae6857f3c320df2cb8fbe3db Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 12:08:21 +0000 Subject: [PATCH 060/160] remap postgres to 5434 --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 841a743..75c3afd 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -36,7 +36,7 @@ services: expose: - "5432" ports: - - "5432:5432" + - "5434:5432" environment: POSTGRES_DB: *PSQL_DBNAME POSTGRES_USER: *PSQL_USER From ce85829732aeee3f63701c18d765e4d7d1e85027 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 12:14:14 +0000 Subject: [PATCH 061/160] ignore error TemplateAlreadyInitialized --- internal/api/templates/templates.go | 3 +++ tests/integresql_test.go | 10 ---------- tests/testclient/client.go | 3 +-- 3 files changed, 4 insertions(+), 12 deletions(-) diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 7483cee..b143a95 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -55,6 +55,9 @@ func putFinalizeTemplate(s *api.Server) echo.HandlerFunc { if _, err := s.Manager.FinalizeTemplateDatabase(ctx, hash); err != nil { switch err { + case manager.ErrTemplateAlreadyInitialized: + // template is initialized, we ignore this error + return c.NoContent(http.StatusNoContent) case manager.ErrManagerNotReady: return echo.ErrServiceUnavailable case manager.ErrTemplateNotFound: diff --git a/tests/integresql_test.go b/tests/integresql_test.go index b687178..8fd5ddb 100644 --- a/tests/integresql_test.go +++ b/tests/integresql_test.go @@ -5,7 +5,6 @@ package integresql_test import ( "context" "database/sql" - "errors" "testing" "time" @@ -116,12 +115,3 @@ func BenchmarkGetDatabaseFromExistingTemplate(b *testing.B) { b.Cleanup(func() { require.NoError(b, client.DiscardTemplate(ctx, newTemplateHash)) }) } - -// nolint: deadcode -func ignoreError(toIgnore error, err error) error { - if errors.Is(err, toIgnore) { - return nil - } - - return err -} diff --git a/tests/testclient/client.go b/tests/testclient/client.go index dd80338..686c204 100644 --- a/tests/testclient/client.go +++ b/tests/testclient/client.go @@ -15,7 +15,6 @@ import ( "path" "github.com/allaboutapps/integresql/pkg/manager" - "github.com/allaboutapps/integresql/pkg/pool" "github.com/allaboutapps/integresql/pkg/util" _ "github.com/lib/pq" ) @@ -220,7 +219,7 @@ func (c *Client) GetTestDatabase(ctx context.Context, hash string) (TestDatabase case http.StatusNotFound: return test, manager.ErrTemplateNotFound case http.StatusGone: - return test, pool.ErrInvalidIndex + return test, manager.ErrTestNotFound case http.StatusServiceUnavailable: return test, manager.ErrManagerNotReady default: From aa9b9490e6b9cc83e061c1addd2af0016275db1b Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 27 Jun 2023 15:05:26 +0200 Subject: [PATCH 062/160] increase timeout for template finalization --- pkg/manager/manager_config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 538c5d5..980c3a6 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -56,7 +56,7 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), - TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 100)), + TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 2000)), TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), } } From 6d07aa1349b0d53007478ca77d4ecd34a5f4d525 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 28 Jun 2023 09:30:01 +0200 Subject: [PATCH 063/160] return http.StatusInsufficientStorage when pool is full --- internal/api/templates/templates.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index b143a95..cf4fc96 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -8,6 +8,7 @@ import ( "github.com/allaboutapps/integresql/internal/api" "github.com/allaboutapps/integresql/pkg/manager" + "github.com/allaboutapps/integresql/pkg/pool" "github.com/labstack/echo/v4" ) @@ -109,6 +110,8 @@ func getTestDatabase(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusNotFound, "template not found") case manager.ErrTemplateDiscarded: return echo.NewHTTPError(http.StatusGone, "template was just discarded") + case pool.ErrPoolFull: + return echo.NewHTTPError(http.StatusInsufficientStorage, "pool is full and can't be extended") default: return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } From d1d45c2578d46f8de26b6154f886d6ae87f9b8c3 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 28 Jun 2023 08:24:56 +0000 Subject: [PATCH 064/160] add more trace info --- pkg/manager/manager.go | 9 +++++++++ pkg/pool/pool.go | 24 ++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 0423476..51dd9f6 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -307,10 +307,16 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData return db.TestDatabase{}, ErrInvalidTemplateState } + ctx, task = trace.NewTask(ctx, "get_with_timeout") testDB, err := m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseGetTimeout) + task.End() + if errors.Is(err, pool.ErrTimeout) { // on timeout we can try to extend the pool + ctx, task := trace.NewTask(ctx, "extend_pool_on_demand") testDB, err = m.pool.ExtendPool(ctx, template.Database) + task.End() + } else if errors.Is(err, pool.ErrUnknownHash) { // the pool has been removed, it needs to be reinitialized initDBFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { @@ -333,6 +339,9 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData } func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) error { + ctx, task := trace.NewTask(ctx, "return_test_db") + defer task.End() + if !m.Ready() { return ErrManagerNotReady } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 8a6dadc..82e8cee 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "runtime/trace" "sync" "time" @@ -99,7 +100,9 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. // ! // DBPool locked + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") p.mutex.RLock() + reg.End() pool := p.pools[hash] if pool == nil { @@ -123,8 +126,10 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. // ! // dbHashPool locked + reg = trace.StartRegion(ctx, "wait_for_lock_hash_pool") pool.Lock() defer pool.Unlock() + reg.End() // sanity check, should never happen if index < 0 || index >= len(pool.dbs) { @@ -152,7 +157,9 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in // ! // DBPool locked + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") p.mutex.Lock() + reg.End() pool := p.pools[hash] if pool == nil { @@ -179,7 +186,9 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes // ! // DBPool locked + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") p.mutex.Lock() + reg.End() pool := p.pools[hash] if pool == nil { @@ -205,7 +214,9 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // ! // DBPool locked + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") p.mutex.Lock() + reg.End() pool := p.pools[hash] if pool == nil { @@ -216,8 +227,10 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // ! // dbHashPool locked + reg = trace.StartRegion(ctx, "wait_for_lock_hash_pool") pool.Lock() defer pool.Unlock() + reg.End() p.mutex.Unlock() // DBPool unlocked @@ -317,7 +330,10 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { break } + reg := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") pool.RLock() + reg.End() + if dirtyID < 0 || dirtyID >= len(pool.dbs) { // sanity check, should never happen pool.RUnlock() @@ -330,26 +346,34 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { continue } + reg = trace.StartRegion(ctx, "worker_cleanup") if err := pool.recreateDB(ctx, testDB.TestDatabase, templateName); err != nil { // TODO anna: error handling fmt.Printf("integresql: failed to clean up DB: %v\n", err) continue } + reg = trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") pool.Lock() + reg.End() + testDB.state = dbStateReady pool.dbs[dirtyID] = testDB + pool.Unlock() pool.ready <- testDB.ID + reg.End() } } func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix string) (db.TestDatabase, error) { // ! // dbHashPool locked + reg := trace.StartRegion(ctx, "extend_wait_for_lock_hash_pool") pool.Lock() defer pool.Unlock() + reg.End() // get index of a next test DB - its ID index := len(pool.dbs) From 3ac3fddfe19152315660b44b268a1869cab5615e Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 28 Jun 2023 08:48:59 +0000 Subject: [PATCH 065/160] add adjustable number of cleaning workers --- pkg/manager/manager.go | 2 +- pkg/manager/manager_config.go | 2 + pkg/pool/pool.go | 45 +++++++++++++--------- pkg/pool/pool_test.go | 72 +++++++++++++++++++++++++++++++++-- 4 files changed, 97 insertions(+), 24 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 51dd9f6..581ae50 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -53,7 +53,7 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { db: nil, wg: sync.WaitGroup{}, templates: templates.NewCollection(), - pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize, testDBPrefix), + pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize, testDBPrefix, config.NumOfCleaningWorkers), connectionCtx: context.TODO(), } diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 980c3a6..f66468d 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -20,6 +20,7 @@ type ManagerConfig struct { TestDatabaseMaxPoolSize int TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state TestDatabaseGetTimeout time.Duration // Time to wait for a ready database before extending the pool + NumOfCleaningWorkers int // Number of pool workers cleaning up dirty DBs } func DefaultManagerConfigFromEnv() ManagerConfig { @@ -58,5 +59,6 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 2000)), TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), + NumOfCleaningWorkers: util.GetEnvAsInt("INTEGRESQL_NUM_OF_CLEANING_WORKERS", 3), } } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 82e8cee..c06c874 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -33,17 +33,18 @@ type DBPool struct { pools map[string]*dbHashPool // map[hash] mutex sync.RWMutex - maxPoolSize int - + maxPoolSize int dbNamePrefix string + numOfWorkers int } -func NewDBPool(maxPoolSize int, testDBNamePrefix string) *DBPool { +func NewDBPool(maxPoolSize int, testDBNamePrefix string, numberOfWorkers int) *DBPool { return &DBPool{ pools: make(map[string]*dbHashPool), maxPoolSize: maxPoolSize, dbNamePrefix: testDBNamePrefix, + numOfWorkers: numberOfWorkers, } } @@ -62,7 +63,8 @@ type dbHashPool struct { recreateDB RecreateDBFunc templateDB db.Database sync.RWMutex - wg sync.WaitGroup + wg sync.WaitGroup + numOfWorkers int } func (p *DBPool) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { @@ -74,9 +76,9 @@ func (p *DBPool) InitHashPool(ctx context.Context, templateDB db.Database, initD func (p *DBPool) initHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { // create a new dbHashPool - pool := newDBHashPool(p.maxPoolSize, initDBFunc, templateDB) + pool := newDBHashPool(p.maxPoolSize, initDBFunc, templateDB, p.numOfWorkers) // and start the cleaning worker - pool.enableWorker() + pool.enableWorker(p.numOfWorkers) // pool is ready p.pools[pool.templateDB.TemplateHash] = pool @@ -302,22 +304,25 @@ func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) // ! } -func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Database) *dbHashPool { +func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Database, numberOfWorkers int) *dbHashPool { return &dbHashPool{ - dbs: make([]existingDB, 0, maxPoolSize), - ready: make(chan int, maxPoolSize), - dirty: make(chan int, maxPoolSize), - recreateDB: recreateDB, - templateDB: templateDB, + dbs: make([]existingDB, 0, maxPoolSize), + ready: make(chan int, maxPoolSize), + dirty: make(chan int, maxPoolSize), + recreateDB: recreateDB, + templateDB: templateDB, + numOfWorkers: numberOfWorkers, } } -func (pool *dbHashPool) enableWorker() { - pool.wg.Add(1) - go func() { - defer pool.wg.Done() - pool.workerCleanUpDirtyDB() - }() +func (pool *dbHashPool) enableWorker(numberOfWorkers int) { + for i := 0; i < numberOfWorkers; i++ { + pool.wg.Add(1) + go func() { + defer pool.wg.Done() + pool.workerCleanUpDirtyDB() + }() + } } func (pool *dbHashPool) workerCleanUpDirtyDB() { @@ -411,7 +416,9 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error // stop the worker // we don't close here because if the remove operation fails, we want to be able to repeat it - pool.dirty <- stopWorkerMessage + for i := 0; i < pool.numOfWorkers; i++ { + pool.dirty <- stopWorkerMessage + } pool.wg.Wait() // ! diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index 638735f..ff3f3f0 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -15,7 +15,7 @@ func TestPoolAddGet(t *testing.T) { t.Parallel() ctx := context.Background() - p := pool.NewDBPool(2, "prefix_") + p := pool.NewDBPool(2, "prefix_", 4) hash1 := "h1" hash2 := "h2" @@ -83,7 +83,7 @@ func TestPoolAddGetConcurrent(t *testing.T) { } maxPoolSize := 6 - p := pool.NewDBPool(maxPoolSize, "") + p := pool.NewDBPool(maxPoolSize, "", 4) var wg sync.WaitGroup sleepDuration := 100 * time.Millisecond @@ -149,7 +149,7 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { } maxPoolSize := 6 - p := pool.NewDBPool(maxPoolSize, "") + p := pool.NewDBPool(maxPoolSize, "", 4) var wg sync.WaitGroup @@ -203,7 +203,7 @@ func TestPoolRemoveAll(t *testing.T) { } maxPoolSize := 6 - p := pool.NewDBPool(maxPoolSize, "") + p := pool.NewDBPool(maxPoolSize, "", 4) // add DBs sequentially for i := 0; i < maxPoolSize; i++ { @@ -228,3 +228,67 @@ func TestPoolRemoveAll(t *testing.T) { p.Stop() } + +func TestPoolInit(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + templateDB1 := db.Database{ + TemplateHash: hash1, + } + + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + t.Log("(re)create ", testDB.Database) + return nil + } + + maxPoolSize := 100 + numOfWorkers := 150 + p := pool.NewDBPool(maxPoolSize, "", numOfWorkers) + + // we will test 2 ways of adding new DBs + for i := 0; i < maxPoolSize/2; i++ { + // add and get freshly added DB + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) + _, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) + assert.NoError(t, err) + + // extend pool (= add and get) + _, err = p.ExtendPool(ctx, templateDB1) + assert.NoError(t, err) + } + + // there should be no more free DBs + _, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, 10*time.Millisecond) + assert.ErrorIs(t, err, pool.ErrTimeout) + + var wg sync.WaitGroup + // now return them all + wg.Add(1) + go func() { + defer wg.Done() + maxPoolSize := maxPoolSize + templateHash := templateDB1.TemplateHash + for i := 0; i < maxPoolSize; i++ { + assert.NoError(t, p.ReturnTestDatabase(ctx, templateHash, i)) + } + }() + + // and check that they can be get again + // = the workers cleaned them up + wg.Add(1) + go func() { + defer wg.Done() + maxPoolSize := maxPoolSize + templateHash := templateDB1.TemplateHash + for i := 0; i < maxPoolSize; i++ { + _, err := p.GetTestDatabase(ctx, templateHash, 10*time.Millisecond) + assert.NoError(t, err) + } + }() + + wg.Wait() + + p.Stop() +} From 0f7faa0b3ee14067819818beceed3e4fde73ae81 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 28 Jun 2023 11:41:27 +0200 Subject: [PATCH 066/160] add worker_cleanup_task --- pkg/pool/pool.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index c06c874..7edd34f 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -327,7 +327,6 @@ func (pool *dbHashPool) enableWorker(numberOfWorkers int) { func (pool *dbHashPool) workerCleanUpDirtyDB() { - ctx := context.Background() templateName := pool.templateDB.Config.Database for dirtyID := range pool.dirty { @@ -335,6 +334,8 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { break } + ctx, task := trace.NewTask(context.Background(), "worker_cleanup_task") + reg := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") pool.RLock() reg.End() @@ -342,12 +343,14 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { if dirtyID < 0 || dirtyID >= len(pool.dbs) { // sanity check, should never happen pool.RUnlock() + task.End() continue } testDB := pool.dbs[dirtyID] pool.RUnlock() if testDB.state != dbStateDirty { + task.End() continue } @@ -355,6 +358,8 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { if err := pool.recreateDB(ctx, testDB.TestDatabase, templateName); err != nil { // TODO anna: error handling fmt.Printf("integresql: failed to clean up DB: %v\n", err) + + task.End() continue } @@ -368,7 +373,9 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { pool.Unlock() pool.ready <- testDB.ID + reg.End() + task.End() } } From f3aaf88eb1bfa580bd40141c31c5b35bf2922b17 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 28 Jun 2023 11:58:53 +0200 Subject: [PATCH 067/160] fix worker trace regions --- pkg/pool/pool.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 7edd34f..22f0ec6 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -336,9 +336,9 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { ctx, task := trace.NewTask(context.Background(), "worker_cleanup_task") - reg := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") + regLock := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") pool.RLock() - reg.End() + regLock.End() if dirtyID < 0 || dirtyID >= len(pool.dbs) { // sanity check, should never happen @@ -354,7 +354,7 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { continue } - reg = trace.StartRegion(ctx, "worker_cleanup") + reg := trace.StartRegion(ctx, "worker_cleanup") if err := pool.recreateDB(ctx, testDB.TestDatabase, templateName); err != nil { // TODO anna: error handling fmt.Printf("integresql: failed to clean up DB: %v\n", err) @@ -363,9 +363,9 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { continue } - reg = trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") + regLock = trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") pool.Lock() - reg.End() + regLock.End() testDB.state = dbStateReady pool.dbs[dirtyID] = testDB From a9b2fbfc675f504d2736409bec837cd498d7a207 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 28 Jun 2023 12:51:31 +0200 Subject: [PATCH 068/160] add pool comments --- pkg/manager/manager.go | 12 ++++++++---- pkg/pool/pool.go | 33 +++++++++++++++++++++++++++------ 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 581ae50..c953b84 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -34,8 +34,8 @@ type Manager struct { templates *templates.Collection pool *pool.DBPool - connectionCtx context.Context - cancelConnectionCtx func() + connectionCtx context.Context // DB connection context used for adding initial DBs in background + cancelConnectionCtx func() // Cancel function for DB connection context } func New(config ManagerConfig) (*Manager, ManagerConfig) { @@ -287,6 +287,8 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db return db.TemplateDatabase{Database: template.Database}, nil } +// GetTestDatabase tries to get a ready test DB from an existing pool. +// If no DB is ready after the preconfigured timeout, it tries to extend the pool and therefore create a new DB. func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatabase, error) { ctx, task := trace.NewTask(ctx, "get_test_db") defer task.End() @@ -318,7 +320,9 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData task.End() } else if errors.Is(err, pool.ErrUnknownHash) { - // the pool has been removed, it needs to be reinitialized + // Template exists, but the pool is not there - + // it must have been removed. + // It needs to be reinitialized. initDBFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) } @@ -471,7 +475,7 @@ func (m *Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owne } // createTestDatabaseFromTemplate adds a new test database in the pool (increasing its size) basing on the given template. -// It waits until the template is ready. +// It waits until the template is finalized. func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template *templates.Template) error { if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != templates.TemplateStateFinalized { // if the state changed in the meantime, return diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 22f0ec6..f241ab4 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -19,12 +19,12 @@ var ( ErrTimeout = errors.New("timeout when waiting for ready db") ) -type dbState int +type dbState int // Indicates a current DB state. const ( - dbStateReady = iota - dbStateInUse = iota - dbStateDirty = iota + dbStateReady dbState = iota // Initialized according to a template and ready to be picked up. + dbStateInUse // Currently in use, can't be reused. + dbStateDirty // Returned to the pool, waiting for the cleaning. ) const stopWorkerMessage int = -1 @@ -35,7 +35,7 @@ type DBPool struct { maxPoolSize int dbNamePrefix string - numOfWorkers int + numOfWorkers int // Number of cleaning workers (each hash pool has enables this number of workers) } func NewDBPool(maxPoolSize int, testDBNamePrefix string, numberOfWorkers int) *DBPool { @@ -48,6 +48,7 @@ func NewDBPool(maxPoolSize int, testDBNamePrefix string, numberOfWorkers int) *D } } +// RecreateDBFunc callback executed when a pool is extended or the DB cleaned up by a worker. type RecreateDBFunc func(ctx context.Context, testDB db.TestDatabase, templateName string) error type existingDB struct { @@ -55,10 +56,11 @@ type existingDB struct { db.TestDatabase } +// dbHashPool holds a test DB pool for a certain hash. Each dbHashPool is running cleanup workers in background. type dbHashPool struct { dbs []existingDB ready chan int // ID of initalized DBs according to a template, ready to pick them up - dirty chan int // ID of returned DBs, need to be initalized again to reuse them + dirty chan int // ID of returned DBs, need to be recreated to reuse them recreateDB RecreateDBFunc templateDB db.Database @@ -67,6 +69,7 @@ type dbHashPool struct { numOfWorkers int } +// InitHashPool creates a new pool with a given template hash and starts the cleanup workers. func (p *DBPool) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { p.mutex.Lock() defer p.mutex.Unlock() @@ -98,6 +101,9 @@ func (p *DBPool) Stop() { } +// GetTestDatabase picks up a ready to use test DB. It waits the given timeout until a DB is available. +// If there is no DB ready and time elapses, ErrTimeout is returned. +// Otherwise, the obtained test DB is marked as 'InUse' and can be reused only if returned to the pool. func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { // ! @@ -154,6 +160,9 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. // ! } +// AddTestDatabase adds a new test DB to the pool and creates it according to the template. +// The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. +// If the pool size has already reached MAX, ErrPoolFull is returned. func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, initFunc RecreateDBFunc) error { hash := templateDB.TemplateHash @@ -183,6 +192,9 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in return nil } +// AddTestDatabase adds a new test DB to the pool, creates it according to the template, and returns it right away to the caller. +// The new test DB is marked as 'IsUse' and won't be picked up with GetTestDatabase, until it's returned to the pool. +// If the pool size has already reached MAX, ErrPoolFull is returned. func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { hash := templateDB.TemplateHash @@ -212,6 +224,9 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes return newTestDB, nil } +// ReturnTestDatabase is used to return a DB that is currently 'InUse' to the pool. +// After successful return, the test DB is cleaned up in the background by a worker. +// If the test DB is in a different state than 'InUse', ErrInvalidState is returned. func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { // ! @@ -259,6 +274,8 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // ! } +// RemoveAllWithHash removes a pool with a given template hash. +// All background workers belonging to this pool are stopped. func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc func(db.TestDatabase) error) error { // ! @@ -285,6 +302,7 @@ func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc // ! } +// RemoveAll removes all tracked pools. func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) error) error { // ! // DBPool locked @@ -325,6 +343,8 @@ func (pool *dbHashPool) enableWorker(numberOfWorkers int) { } } +// workerCleanUpDirtyDB reads 'dirty' channel and cleans up a test DB with the received index. +// When the DB is recreated according to a template, its index goes to the 'ready' channel. func (pool *dbHashPool) workerCleanUpDirtyDB() { templateName := pool.templateDB.Config.Database @@ -459,6 +479,7 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error // ! } +// MakeDBName makes a test DB name with the configured prefix, template hash and ID of the DB. func (p *DBPool) MakeDBName(hash string, id int) string { p.mutex.RLock() p.mutex.RUnlock() From a57ab09d68a6fd0e6f8f5b1ef3ef675e02013ed1 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 28 Jun 2023 12:59:22 +0200 Subject: [PATCH 069/160] add util comments --- pkg/util/wait.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/util/wait.go b/pkg/util/wait.go index b4747ea..bd05a23 100644 --- a/pkg/util/wait.go +++ b/pkg/util/wait.go @@ -10,6 +10,7 @@ import ( var ErrTimeout = errors.New("timeout while waiting for operation to complete") +// WaitWithTimeout waits for the operation to complete of returns the ErrTimeout. func WaitWithTimeout[T any](ctx context.Context, timeout time.Duration, operation func(context.Context) (T, error)) (T, error) { cctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() @@ -32,6 +33,8 @@ func WaitWithTimeout[T any](ctx context.Context, timeout time.Duration, operatio } } +// WaitWithCancellableCtx runs the operation tracking the context state. +// If the given context is cancelled, the function returns directly with ErrTimeout. func WaitWithCancellableCtx[T any](ctx context.Context, operation func(context.Context) (T, error)) (T, error) { resChan := make(chan T, 1) From 2f4eafbe5a459cebd9f2b5e7718f7106d839fc5d Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 28 Jun 2023 13:38:16 +0200 Subject: [PATCH 070/160] add template tests and comments --- pkg/templates/template.go | 16 +++++- pkg/templates/template_collection.go | 9 ++++ pkg/templates/template_collection_test.go | 60 +++++++++++++++++++++++ 3 files changed, 83 insertions(+), 2 deletions(-) create mode 100644 pkg/templates/template_collection_test.go diff --git a/pkg/templates/template.go b/pkg/templates/template.go index afe25b3..277bfb2 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -35,6 +35,7 @@ func NewTemplate(database db.Database) *Template { return t } +// GetState locks the template and checks its state. func (t *Template) GetState(ctx context.Context) TemplateState { t.mutex.RLock() defer t.mutex.RUnlock() @@ -42,6 +43,7 @@ func (t *Template) GetState(ctx context.Context) TemplateState { return t.state } +// SetState sets the desired state and broadcasts the change to whoever is waiting for it. func (t *Template) SetState(ctx context.Context, newState TemplateState) { if t.GetState(ctx) == newState { return @@ -54,6 +56,9 @@ func (t *Template) SetState(ctx context.Context, newState TemplateState) { t.cond.Broadcast() } +// WaitUntilFinalized checks the current template state and returns directly if it's 'Finalized'. +// If it's not, the function waits the given timeout until the template state changes. +// On timeout, the old state is returned, otherwise - the new state. func (t *Template) WaitUntilFinalized(ctx context.Context, timeout time.Duration) (exitState TemplateState) { currentState := t.GetState(ctx) if currentState == TemplateStateFinalized { @@ -74,6 +79,8 @@ func (t *Template) WaitUntilFinalized(ctx context.Context, timeout time.Duration return newState } +// GetStateWithLock gets the current state leaving the template locked. +// REMEMBER to unlock it when you no longer need it locked. func (t *Template) GetStateWithLock(ctx context.Context) (TemplateState, lockedTemplate) { t.mutex.Lock() @@ -84,10 +91,15 @@ type lockedTemplate struct { t *Template } -func (l lockedTemplate) Unlock() { - l.t.mutex.Unlock() +// Unlock releases the locked template. +func (l *lockedTemplate) Unlock() { + if l.t != nil { + l.t.mutex.Unlock() + l.t = nil + } } +// SetState sets a new state of the locked template (without acquiring the lock again). func (l lockedTemplate) SetState(ctx context.Context, newState TemplateState) { if l.t.state == newState { return diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go index fe87a0d..acf7a28 100644 --- a/pkg/templates/template_collection.go +++ b/pkg/templates/template_collection.go @@ -13,6 +13,7 @@ type Collection struct { collMutex sync.RWMutex } +// Unlock function used to release the collection lock. type Unlock func() func NewCollection() *Collection { @@ -22,6 +23,10 @@ func NewCollection() *Collection { } } +// Push tries to add a new template to the collection. +// Returns added=false, if the template has been there already. +// In such case, it is not overwritten! To replace a template, first remove it (via Pop) and then Push again. +// This function locks the collection and no matter what is its output, the unlock function needs to be called to release the lock. func (tc *Collection) Push(ctx context.Context, hash string, template db.DatabaseConfig) (added bool, unlock Unlock) { reg := trace.StartRegion(ctx, "get_template_lock") tc.collMutex.Lock() @@ -40,6 +45,7 @@ func (tc *Collection) Push(ctx context.Context, hash string, template db.Databas return true, unlock } +// Pop removes a template from the collection returning it to the caller. func (tc *Collection) Pop(ctx context.Context, hash string) (template *Template, found bool) { reg := trace.StartRegion(ctx, "get_template_lock") defer reg.End() @@ -55,6 +61,7 @@ func (tc *Collection) Pop(ctx context.Context, hash string) (template *Template, return template, true } +// Get gets the requested template without removing it from the collection. func (tc *Collection) Get(ctx context.Context, hash string) (template *Template, found bool) { reg := trace.StartRegion(ctx, "get_template_lock") defer reg.End() @@ -70,10 +77,12 @@ func (tc *Collection) Get(ctx context.Context, hash string) (template *Template, return template, true } +// RemoveUnsafe removes the template and can be called ONLY IF THE COLLECTION IS LOCKED. func (tc *Collection) RemoveUnsafe(ctx context.Context, hash string) { delete(tc.templates, hash) } +// RemoveAll removes all templates from the collection. func (tc *Collection) RemoveAll(ctx context.Context) { reg := trace.StartRegion(ctx, "get_template_lock") defer reg.End() diff --git a/pkg/templates/template_collection_test.go b/pkg/templates/template_collection_test.go new file mode 100644 index 0000000..5fbec40 --- /dev/null +++ b/pkg/templates/template_collection_test.go @@ -0,0 +1,60 @@ +package templates_test + +import ( + "context" + "testing" + "time" + + "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/templates" + "github.com/allaboutapps/integresql/pkg/util" + "github.com/stretchr/testify/assert" +) + +func TestTemplateCollection(t *testing.T) { + ctx := context.Background() + + coll := templates.NewCollection() + cfg := db.DatabaseConfig{ + Username: "ich", + Database: "template_test", + } + hash := "123" + + added, unlock := coll.Push(ctx, hash, cfg) + assert.True(t, added) + unlock() + + template1, found := coll.Get(ctx, hash) + assert.True(t, found) + + // get with lock + state, lockedTemplate := template1.GetStateWithLock(ctx) + assert.Equal(t, templates.TemplateStateInit, state) + + // try to get again when the template is locked + template2, found := coll.Get(ctx, hash) + assert.True(t, found) + + // assert that getting the state now won't succeed - template is locked + _, err := util.WaitWithTimeout(ctx, 100*time.Millisecond, func(ctx context.Context) (templates.TemplateState, error) { + return template1.GetState(ctx), nil + }) + assert.ErrorIs(t, err, util.ErrTimeout) + _, err = util.WaitWithTimeout(ctx, 100*time.Millisecond, func(ctx context.Context) (templates.TemplateState, error) { + return template2.GetState(ctx), nil + }) + assert.ErrorIs(t, err, util.ErrTimeout) + + // now set the new state and unlock the locked template + lockedTemplate.SetState(ctx, templates.TemplateStateDiscarded) + lockedTemplate.Unlock() + lockedTemplate.Unlock() + + assert.Equal(t, templates.TemplateStateDiscarded, template2.GetState(ctx)) + + // make sure that the template is still in the collection + template3, found := coll.Get(ctx, hash) + assert.True(t, found) + assert.Equal(t, "ich", template3.Config.Username) +} From 5e8c4e7839e24c9d81ae7734cf988ccb0be641b8 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 30 Jun 2023 09:30:10 +0200 Subject: [PATCH 071/160] use distroless/base-debian11:debug image --- Dockerfile | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1e46759..638eca9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -141,8 +141,11 @@ RUN make build # --- Stage: integresql ### ----------------------- -# https://github.com/GoogleContainerTools/distroless -FROM gcr.io/distroless/base as integresql +# Distroless images are minimal and lack shell access. +# https://github.com/GoogleContainerTools/distroless/blob/master/base/README.md +# The :debug image provides a busybox shell to enter. +# https://github.com/GoogleContainerTools/distroless#debug-images +FROM gcr.io/distroless/base-debian11:debug as integresql COPY --from=builder-integresql /app/bin/integresql / # Note that cmd is not supported with these kind of images, no shell included # see https://github.com/GoogleContainerTools/distroless/issues/62 From 8764d0440e9e4f2c31266ea7a844aca49433fc49 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 4 Jul 2023 09:28:51 +0000 Subject: [PATCH 072/160] add drone file --- .drone.yml | 417 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 417 insertions(+) create mode 100644 .drone.yml diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 0000000..8997be3 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,417 @@ +# ----------------------------------------------------------------------------- +# SETTINGS +# ----------------------------------------------------------------------------- + +# Drone matrix: Additional ENV vars for substitution - http://docs.drone.io/matrix-builds/ +# Will be evaluated BEFORE the YAML is parsed, ONLY strings allowed, NO substitutions ${XXX} here. + +matrix: + include: + - BUILD_ENV: all + # The name of the k8s namespaces that these pipelines will target. + # K8S_DEPLOY_NS_DEV: + # K8S_DEPLOY_NS_STAGING: + # K8S_DEPLOY_NS_PRODUCTION: + +# YAML Configuration anchors - https://learnxinyminutes.com/docs/yaml/ +# Will be evaluated WHILE the YAML is parsed, any valid yaml allowed, substitutions ${XXX} allowed. + +alias: + # The image will be tagged with this, pushed to gcr and referenced with this key in the k8s deployment + - &IMAGE_DEPLOY_TAG ${DRONE_COMMIT_SHA} + + # The image name, defaults to lowercase repo name /, e.g. aw/aaa-cab-kubernetes-test + - &IMAGE_DEPLOY_NAME ${DRONE_REPO,,} + + # The intermediate builder image name + - &IMAGE_BUILDER_ID ${DRONE_REPO,,}-builder:${DRONE_COMMIT_SHA} + + # The full uniquely tagged app image name + - &IMAGE_DEPLOY_ID ${DRONE_REPO,,}:${DRONE_COMMIT_SHA} + + # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) + - &GCR_PUBLISH_BRANCHES [dev, staging, master] + + # # Docker registry publish default settings + # - &GCR_REGISTRY_SETTINGS + # image: plugins/gcr + # repo: a3cloud-192413/${DRONE_REPO,,} + # registry: eu.gcr.io + # secrets: + # - source: AAA_GCR_SERVICE_ACCOUNT_JSON + # target: google_credentials + # # local short-time-cache: don't cleanup any image layers after pushing + # purge: false + # # force compress of docker build context + # compress: true + # volumes: # mount needed to push the already build container + # - /var/run/docker.sock:/var/run/docker.sock + + # # Deployment default settings + # - &K8S_DEPLOY_SETTINGS + # image: eu.gcr.io/a3cloud-192413/aw/aaa-drone-kubernetes:latest + # pull: true + # secrets: + # - source: AAA_K8S_SERVER + # target: KUBERNETES_SERVER + # - source: AAA_K8S_SERVICE_ACCOUNT_CRT + # target: KUBERNETES_CERT + # - source: AAA_K8S_SERVICE_ACCOUNT_TOKEN + # target: KUBERNETES_TOKEN + # - source: AAA_GCR_SERVICE_ACCOUNT_JSON + # target: GCR_SERVICE_ACCOUNT + # deployment: app + # repo: eu.gcr.io/a3cloud-192413/${DRONE_REPO,,} + # container: [app] + # tag: *IMAGE_DEPLOY_TAG + # gcr_service_account_email: drone-ci-a3cloud@a3cloud-192413.iam.gserviceaccount.com + # mgmt_repo: https://git.allaboutapps.at/scm/aw/a3cloud-mgmt.git + # mgmt_git_email: infrastructure+drone@allaboutapps.at + + # ENV variables for executing within the test env (similar to the env in docker-compose.yml) + - &TEST_ENV + CI: ${CI} + + # required: env for main working database, service + # default for sql-migrate (target development) and psql cli tool + PGDATABASE: &PGDATABASE "development" + PGUSER: &PGUSER "dbuser" + PGPASSWORD: &PGPASSWORD "dbpass" + PGHOST: &PGHOST "postgres" + PGPORT: &PGPORT "5432" + PGSSLMODE: &PGSSLMODE "disable" + + # optional: env for sql-boiler (ability to generate models out of a "spec" database) + # sql-boiler should operate on a "spec" database only + PSQL_DBNAME: "spec" + PSQL_USER: *PGUSER + PSQL_PASS: *PGPASSWORD + PSQL_HOST: *PGHOST + PSQL_PORT: *PGPORT + PSQL_SSLMODE: *PGSSLMODE + + # required for drone: project root directory, used for relative path resolution (e.g. fixtures) + PROJECT_ROOT_DIR: /app + + # docker run related. + SERVER_MANAGEMENT_SECRET: "mgmt-secret" + + # Which build events should trigger the main pipeline (defaults to all) + - &BUILD_EVENTS [push, pull_request, tag] + + # Pipeline merge helper: only execute if build event received + - &WHEN_BUILD_EVENT + when: + event: *BUILD_EVENTS + +# The actual pipeline building our product +pipeline: + # --------------------------------------------------------------------------- + # BUILD + # --------------------------------------------------------------------------- + + "database connection": + group: build + image: postgres:12.4-alpine # should be the same version as used in .drone.yml, .github/workflows, Dockerfile and live + commands: + # wait for postgres service to become available + - | + until psql -U $PGUSER -d $PGDATABASE -h postgres \ + -c "SELECT 1;" >/dev/null 2>&1; do sleep 1; done + # query the database + - | + psql -U $PGUSER -d $PGDATABASE -h postgres \ + -c "SELECT name, setting FROM pg_settings;" + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + "docker build (target builder)": + group: build + image: docker:latest + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + IMAGE_TAG: *IMAGE_BUILDER_ID + commands: + - "docker build --target builder --compress -t $${IMAGE_TAG} ." + <<: *WHEN_BUILD_EVENT + + "docker build (target app)": + group: build-app + image: docker:latest + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + IMAGE_TAG: *IMAGE_DEPLOY_ID + commands: + - "docker build --target app --compress -t $${IMAGE_TAG} ." + <<: *WHEN_BUILD_EVENT + + # --------------------------------------------------------------------------- + # CHECK + # --------------------------------------------------------------------------- + + "trivy scan": + group: pre-test + image: aquasec/trivy:latest + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /server/drone/trivy-cache:/root/.cache/ + environment: + IMAGE_TAG: *IMAGE_DEPLOY_ID + commands: + # Print report + - "trivy image --exit-code 0 --no-progress $${IMAGE_TAG}" + # Fail on severity HIGH and CRITICAL + - "trivy image --exit-code 1 --severity HIGH,CRITICAL --no-progress --ignore-unfixed $${IMAGE_TAG}" + <<: *WHEN_BUILD_EVENT + + "build & diff": + group: pre-test + image: *IMAGE_BUILDER_ID + commands: + - cd $PROJECT_ROOT_DIR # reuse go build cache from Dockerfile builder stage + - make tidy + - make build + - /bin/cp -Rf $PROJECT_ROOT_DIR/* $DRONE_WORKSPACE # switch back to drone workspace ... + - cd $DRONE_WORKSPACE + - "git diff --exit-code" # ... for git diffing (otherwise not possible as .git is .dockerignored) + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + "info": + group: test + image: *IMAGE_BUILDER_ID + commands: + - cd $PROJECT_ROOT_DIR # reuse go build cache from Dockerfile builder stage + - make info + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + "test": + group: test + image: *IMAGE_BUILDER_ID + commands: + - cd $PROJECT_ROOT_DIR # reuse go build cache from Dockerfile builder stage + - make test + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + # "swagger-codegen-cli": + # group: test + # # https://github.com/swagger-api/swagger-codegen/blob/master/modules/swagger-codegen-cli/Dockerfile + # image: swaggerapi/swagger-codegen-cli + # commands: + # # run the main swagger.yml validation. + # - "java -jar /opt/swagger-codegen-cli/swagger-codegen-cli.jar validate -i ./api/swagger.yml" + # <<: *WHEN_BUILD_EVENT + + "binary: deps": + group: test + image: *IMAGE_BUILDER_ID + commands: + - cd $PROJECT_ROOT_DIR + - make get-embedded-modules-count + - make get-embedded-modules + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + "binary: licenses": + group: test + image: *IMAGE_BUILDER_ID + commands: + - cd $PROJECT_ROOT_DIR + - make get-licenses + environment: *TEST_ENV + <<: *WHEN_BUILD_EVENT + + "docker run (target app)": + group: test + image: docker:latest + volumes: + - /var/run/docker.sock:/var/run/docker.sock + environment: + <<: *TEST_ENV + IMAGE_TAG: *IMAGE_DEPLOY_ID + commands: + # Note: NO network related tests are possible here, dnd can just + # run sibling containers. We have no possibility to connect them + # into the drone user defined per build docker network! + # https://github.com/drone-plugins/drone-docker/issues/193 + # https://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/ + - (env | grep "^\S*=" | grep -v -e "DRONE=" -e "DRONE_" -e "CI_" -e "CI=" -e "HOME=" -e "HOSTNAME=" -e "SHELL=" -e "PWD=" -e "PATH=") > .hostenv + - cat .hostenv + - "docker run --env-file .hostenv $${IMAGE_TAG} help" + - "docker run --env-file .hostenv $${IMAGE_TAG} -v" + - "docker run --env-file .hostenv $${IMAGE_TAG} env" + <<: *WHEN_BUILD_EVENT + + # --------------------------------------------------------------------------- + # PUBLISH + # --------------------------------------------------------------------------- + + # Built a allowed branch? Push to cloud registry + "publish ${DRONE_BRANCH}_${DRONE_COMMIT_SHA:0:10}": + group: publish + <<: *GCR_REGISTRY_SETTINGS + tags: + - build_${DRONE_BUILD_NUMBER} + - ${DRONE_BRANCH/\//-}_${DRONE_COMMIT_SHA:0:10} + - *IMAGE_DEPLOY_TAG + - latest + - ${DRONE_BRANCH/\//-} + - '${DRONE_COMMIT_SHA:0:10}' + when: + branch: *GCR_PUBLISH_BRANCHES + event: *BUILD_EVENTS + + # Built a tag? Push to cloud registry + "publish tag_${DRONE_COMMIT_SHA:0:10}": + group: publish + <<: *GCR_REGISTRY_SETTINGS + tags: + - build_${DRONE_BUILD_NUMBER} + - tag_${DRONE_COMMIT_SHA:0:10} + - *IMAGE_DEPLOY_TAG + - latest + - ${DRONE_TAG} + - ${DRONE_COMMIT_SHA:0:10} + when: + event: tag + + # --------------------------------------------------------------------------- + # DEPLOYMENT + # --------------------------------------------------------------------------- + + # # autodeploy dev if it hits the branch + # "deploy ${DRONE_COMMIT_SHA:0:10} to ${K8S_DEPLOY_NS_DEV} (auto)": + # <<: *K8S_DEPLOY_SETTINGS + # namespace: ${K8S_DEPLOY_NS_DEV} + # mgmt_deployment_yaml: namespaces/${K8S_DEPLOY_NS_DEV}/app.deployment.yaml + # when: + # event: *BUILD_EVENTS + # branch: [dev] + + # # promote dev through "drone deploy dev" + # "deploy ${DRONE_COMMIT_SHA:0:10} to ${K8S_DEPLOY_NS_DEV} (promote)": + # <<: *K8S_DEPLOY_SETTINGS + # namespace: ${K8S_DEPLOY_NS_DEV} + # mgmt_deployment_yaml: namespaces/${K8S_DEPLOY_NS_DEV}/app.deployment.yaml + # when: + # environment: dev + # event: deployment + + # # autodeploy staging if it hits the branch + # "deploy ${DRONE_COMMIT_SHA:0:10} to ${K8S_DEPLOY_NS_STAGING} (auto)": + # <<: *K8S_DEPLOY_SETTINGS + # namespace: ${K8S_DEPLOY_NS_STAGING} + # mgmt_deployment_yaml: namespaces/${K8S_DEPLOY_NS_STAGING}/app.deployment.yaml + # when: + # event: *BUILD_EVENTS + # branch: [staging] + + # # promote staging through "drone deploy staging" + # "deploy ${DRONE_COMMIT_SHA:0:10} to ${K8S_DEPLOY_NS_STAGING} (promote)": + # <<: *K8S_DEPLOY_SETTINGS + # namespace: ${K8S_DEPLOY_NS_STAGING} + # mgmt_deployment_yaml: namespaces/${K8S_DEPLOY_NS_STAGING}/app.deployment.yaml + # when: + # environment: staging + # event: deployment + + # # promote production through "drone deploy production" + # "deploy ${DRONE_COMMIT_SHA:0:10} to ${K8S_DEPLOY_NS_PRODUCTION} (promote)": + # <<: *K8S_DEPLOY_SETTINGS + # namespace: ${K8S_DEPLOY_NS_PRODUCTION} + # mgmt_deployment_yaml: namespaces/${K8S_DEPLOY_NS_PRODUCTION}/app.deployment.yaml + # when: + # environment: production + # event: deployment + + # --------------------------------------------------------------------------- + # DEPLOYMENT go-starter + # Purpose: go-starter drone specific publish and deployment steps + # NOTE: you do not need to uncomment them for our customer projects + # These steps won't be executed unless we work in the main "AW/go-starter" repo + # --------------------------------------------------------------------------- + + "go-starter publish ${DRONE_BRANCH}_${DRONE_COMMIT_SHA:0:10}": + group: go-starter-publish + image: plugins/gcr + repo: a3cloud-192413/${DRONE_REPO,,} + registry: eu.gcr.io + secrets: + - source: AAA_GCR_SERVICE_ACCOUNT_JSON + target: google_credentials + # local short-time-cache: don't cleanup any image layers after pushing + purge: false + # force compress of docker build context + compress: true + volumes: # mount needed to push the already build container + - /var/run/docker.sock:/var/run/docker.sock + tags: + - build_${DRONE_BUILD_NUMBER} + - ${DRONE_BRANCH/\//-}_${DRONE_COMMIT_SHA:0:10} + - *IMAGE_DEPLOY_TAG + - latest + - ${DRONE_BRANCH/\//-} + - "${DRONE_COMMIT_SHA:0:10}" + when: + repo: AW/go-starter + branch: [master, mr/a3cloud, mr/liveness-probing] + event: [push, pull_request, tag] + + "go-starter deploy ${DRONE_COMMIT_SHA:0:10} to allaboutapps-go-starter-dev (auto)": + group: go-starter-deploy + image: eu.gcr.io/a3cloud-192413/aw/aaa-drone-kubernetes:latest + pull: true + secrets: + - source: AAA_K8S_SERVER + target: KUBERNETES_SERVER + - source: AAA_K8S_SERVICE_ACCOUNT_CRT + target: KUBERNETES_CERT + - source: AAA_K8S_SERVICE_ACCOUNT_TOKEN + target: KUBERNETES_TOKEN + - source: AAA_GCR_SERVICE_ACCOUNT_JSON + target: GCR_SERVICE_ACCOUNT + deployment: app + repo: eu.gcr.io/a3cloud-192413/${DRONE_REPO,,} + container: [app] + tag: *IMAGE_DEPLOY_TAG + gcr_service_account_email: drone-ci-a3cloud@a3cloud-192413.iam.gserviceaccount.com + mgmt_repo: https://git.allaboutapps.at/scm/aw/a3cloud-mgmt.git + mgmt_git_email: infrastructure+drone@allaboutapps.at + namespace: allaboutapps-go-starter-dev + mgmt_deployment_yaml: namespaces/allaboutapps-go-starter-dev/app.deployment.yaml + when: + repo: AW/go-starter + branch: [master, mr/a3cloud, mr/liveness-probing] + event: [push, pull_request, tag] + +# Long living services where the startup order does not matter (otherwise use detach: true) +services: + # --------------------------------------------------------------------------- + # SERVICES + # --------------------------------------------------------------------------- + + "env": + image: alpine + commands: + - "env | sort" + + "postgres": + image: postgres:12.4-alpine # should be the same version as used in .drone.yml, .github/workflows, Dockerfile and live + environment: + POSTGRES_DB: *PGDATABASE + POSTGRES_USER: *PGUSER + POSTGRES_PASSWORD: *PGPASSWORD + # ATTENTION + # fsync=off, synchronous_commit=off and full_page_writes=off + # gives us a major speed up during local development and testing (~30%), + # however you should NEVER use these settings in PRODUCTION unless + # you want to have CORRUPTED data. + # DO NOT COPY/PASTE THIS BLINDLY. + # YOU HAVE BEEN WARNED. + # Apply some performance improvements to pg as these guarantees are not needed while running integration tests + command: "-c 'shared_buffers=128MB' -c 'fsync=off' -c 'synchronous_commit=off' -c 'full_page_writes=off' -c 'max_connections=100' -c 'client_min_messages=warning'" + <<: *WHEN_BUILD_EVENT From 33d1ee7cbdd7987f710bf67ce51220112f871b42 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 4 Jul 2023 10:15:08 +0000 Subject: [PATCH 073/160] fix indentation --- .drone.yml | 60 +++++++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/.drone.yml b/.drone.yml index 8997be3..d8e2771 100644 --- a/.drone.yml +++ b/.drone.yml @@ -29,8 +29,8 @@ alias: # The full uniquely tagged app image name - &IMAGE_DEPLOY_ID ${DRONE_REPO,,}:${DRONE_COMMIT_SHA} - # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) - - &GCR_PUBLISH_BRANCHES [dev, staging, master] + # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) + - &GCR_PUBLISH_BRANCHES [dev, staging, master] # # Docker registry publish default settings # - &GCR_REGISTRY_SETTINGS @@ -250,34 +250,34 @@ pipeline: # PUBLISH # --------------------------------------------------------------------------- - # Built a allowed branch? Push to cloud registry - "publish ${DRONE_BRANCH}_${DRONE_COMMIT_SHA:0:10}": - group: publish - <<: *GCR_REGISTRY_SETTINGS - tags: - - build_${DRONE_BUILD_NUMBER} - - ${DRONE_BRANCH/\//-}_${DRONE_COMMIT_SHA:0:10} - - *IMAGE_DEPLOY_TAG - - latest - - ${DRONE_BRANCH/\//-} - - '${DRONE_COMMIT_SHA:0:10}' - when: - branch: *GCR_PUBLISH_BRANCHES - event: *BUILD_EVENTS - - # Built a tag? Push to cloud registry - "publish tag_${DRONE_COMMIT_SHA:0:10}": - group: publish - <<: *GCR_REGISTRY_SETTINGS - tags: - - build_${DRONE_BUILD_NUMBER} - - tag_${DRONE_COMMIT_SHA:0:10} - - *IMAGE_DEPLOY_TAG - - latest - - ${DRONE_TAG} - - ${DRONE_COMMIT_SHA:0:10} - when: - event: tag + # Built a allowed branch? Push to cloud registry + "publish ${DRONE_BRANCH}_${DRONE_COMMIT_SHA:0:10}": + group: publish + <<: *GCR_REGISTRY_SETTINGS + tags: + - build_${DRONE_BUILD_NUMBER} + - ${DRONE_BRANCH/\//-}_${DRONE_COMMIT_SHA:0:10} + - *IMAGE_DEPLOY_TAG + - latest + - ${DRONE_BRANCH/\//-} + - '${DRONE_COMMIT_SHA:0:10}' + when: + branch: *GCR_PUBLISH_BRANCHES + event: *BUILD_EVENTS + + # Built a tag? Push to cloud registry + "publish tag_${DRONE_COMMIT_SHA:0:10}": + group: publish + <<: *GCR_REGISTRY_SETTINGS + tags: + - build_${DRONE_BUILD_NUMBER} + - tag_${DRONE_COMMIT_SHA:0:10} + - *IMAGE_DEPLOY_TAG + - latest + - ${DRONE_TAG} + - ${DRONE_COMMIT_SHA:0:10} + when: + event: tag # --------------------------------------------------------------------------- # DEPLOYMENT From 448bf34d64310f12a528793bdb5fccbdaecbc497 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 4 Jul 2023 10:21:12 +0000 Subject: [PATCH 074/160] remove go-starter steps --- .drone.yml | 60 ------------------------------------------------------ 1 file changed, 60 deletions(-) diff --git a/.drone.yml b/.drone.yml index d8e2771..e7a3c07 100644 --- a/.drone.yml +++ b/.drone.yml @@ -328,66 +328,6 @@ pipeline: # environment: production # event: deployment - # --------------------------------------------------------------------------- - # DEPLOYMENT go-starter - # Purpose: go-starter drone specific publish and deployment steps - # NOTE: you do not need to uncomment them for our customer projects - # These steps won't be executed unless we work in the main "AW/go-starter" repo - # --------------------------------------------------------------------------- - - "go-starter publish ${DRONE_BRANCH}_${DRONE_COMMIT_SHA:0:10}": - group: go-starter-publish - image: plugins/gcr - repo: a3cloud-192413/${DRONE_REPO,,} - registry: eu.gcr.io - secrets: - - source: AAA_GCR_SERVICE_ACCOUNT_JSON - target: google_credentials - # local short-time-cache: don't cleanup any image layers after pushing - purge: false - # force compress of docker build context - compress: true - volumes: # mount needed to push the already build container - - /var/run/docker.sock:/var/run/docker.sock - tags: - - build_${DRONE_BUILD_NUMBER} - - ${DRONE_BRANCH/\//-}_${DRONE_COMMIT_SHA:0:10} - - *IMAGE_DEPLOY_TAG - - latest - - ${DRONE_BRANCH/\//-} - - "${DRONE_COMMIT_SHA:0:10}" - when: - repo: AW/go-starter - branch: [master, mr/a3cloud, mr/liveness-probing] - event: [push, pull_request, tag] - - "go-starter deploy ${DRONE_COMMIT_SHA:0:10} to allaboutapps-go-starter-dev (auto)": - group: go-starter-deploy - image: eu.gcr.io/a3cloud-192413/aw/aaa-drone-kubernetes:latest - pull: true - secrets: - - source: AAA_K8S_SERVER - target: KUBERNETES_SERVER - - source: AAA_K8S_SERVICE_ACCOUNT_CRT - target: KUBERNETES_CERT - - source: AAA_K8S_SERVICE_ACCOUNT_TOKEN - target: KUBERNETES_TOKEN - - source: AAA_GCR_SERVICE_ACCOUNT_JSON - target: GCR_SERVICE_ACCOUNT - deployment: app - repo: eu.gcr.io/a3cloud-192413/${DRONE_REPO,,} - container: [app] - tag: *IMAGE_DEPLOY_TAG - gcr_service_account_email: drone-ci-a3cloud@a3cloud-192413.iam.gserviceaccount.com - mgmt_repo: https://git.allaboutapps.at/scm/aw/a3cloud-mgmt.git - mgmt_git_email: infrastructure+drone@allaboutapps.at - namespace: allaboutapps-go-starter-dev - mgmt_deployment_yaml: namespaces/allaboutapps-go-starter-dev/app.deployment.yaml - when: - repo: AW/go-starter - branch: [master, mr/a3cloud, mr/liveness-probing] - event: [push, pull_request, tag] - # Long living services where the startup order does not matter (otherwise use detach: true) services: # --------------------------------------------------------------------------- From c8d2f9759130a8acc2f455b6534934d8c5dfc747 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 4 Jul 2023 10:22:16 +0000 Subject: [PATCH 075/160] uncomment GCR_REGISTRY_SETTINGS --- .drone.yml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/.drone.yml b/.drone.yml index e7a3c07..b7c6075 100644 --- a/.drone.yml +++ b/.drone.yml @@ -32,20 +32,20 @@ alias: # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) - &GCR_PUBLISH_BRANCHES [dev, staging, master] - # # Docker registry publish default settings - # - &GCR_REGISTRY_SETTINGS - # image: plugins/gcr - # repo: a3cloud-192413/${DRONE_REPO,,} - # registry: eu.gcr.io - # secrets: - # - source: AAA_GCR_SERVICE_ACCOUNT_JSON - # target: google_credentials - # # local short-time-cache: don't cleanup any image layers after pushing - # purge: false - # # force compress of docker build context - # compress: true - # volumes: # mount needed to push the already build container - # - /var/run/docker.sock:/var/run/docker.sock + # Docker registry publish default settings + - &GCR_REGISTRY_SETTINGS + image: plugins/gcr + repo: a3cloud-192413/${DRONE_REPO,,} + registry: eu.gcr.io + secrets: + - source: AAA_GCR_SERVICE_ACCOUNT_JSON + target: google_credentials + # local short-time-cache: don't cleanup any image layers after pushing + purge: false + # force compress of docker build context + compress: true + volumes: # mount needed to push the already build container + - /var/run/docker.sock:/var/run/docker.sock # # Deployment default settings # - &K8S_DEPLOY_SETTINGS From 313fe7f80681f20cd371636264afcd577ee40cbf Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 4 Jul 2023 11:08:21 +0000 Subject: [PATCH 076/160] fix expired postgres certificate --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0b96f57..a6f031b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,8 +10,8 @@ ENV PATH $GOBIN:$PATH # e.g. stretch=>stretch-pgdg, buster=>buster-pgdg RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ buster-pgdg main" \ | tee /etc/apt/sources.list.d/pgdg.list \ - && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc \ - | apt-key add - + && apt install curl ca-certificates gnupg \ + && curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /etc/apt/trusted.gpg.d/apt.postgresql.org.gpg >/dev/null # Install required system dependencies RUN apt-get update \ @@ -49,7 +49,7 @@ RUN wget https://github.com/kyoh86/richgo/releases/download/v0.3.3/richgo_0.3.3_ RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh \ | sh -s -- -b $(go env GOPATH)/bin v1.24.0 -# go swagger: (this package should NOT be installed via go get) +# go swagger: (this package should NOT be installed via go get) # https://github.com/go-swagger/go-swagger/releases RUN curl -o /usr/local/bin/swagger -L'#' \ "https://github.com/go-swagger/go-swagger/releases/download/v0.23.0/swagger_linux_amd64" \ From 3173a064fb4b26646bb2100b962e87869797e29f Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 4 Jul 2023 14:24:45 +0200 Subject: [PATCH 077/160] drone: fix target name --- .drone.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.drone.yml b/.drone.yml index b7c6075..9f51a77 100644 --- a/.drone.yml +++ b/.drone.yml @@ -136,7 +136,7 @@ pipeline: - "docker build --target builder --compress -t $${IMAGE_TAG} ." <<: *WHEN_BUILD_EVENT - "docker build (target app)": + "docker build (target integresql)": group: build-app image: docker:latest volumes: @@ -144,7 +144,7 @@ pipeline: environment: IMAGE_TAG: *IMAGE_DEPLOY_ID commands: - - "docker build --target app --compress -t $${IMAGE_TAG} ." + - "docker build --target integresql --compress -t $${IMAGE_TAG} ." <<: *WHEN_BUILD_EVENT # --------------------------------------------------------------------------- From 1211a8c390cf4351ea96b13bece275750c518374 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 4 Jul 2023 16:48:21 +0200 Subject: [PATCH 078/160] trivy: ignore vulnerabilities --- .trivyignore | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .trivyignore diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 0000000..e6e2213 --- /dev/null +++ b/.trivyignore @@ -0,0 +1,11 @@ +# TODO: remove them all when a maintained golang image is used +CVE-2022-32149 +CVE-2021-38561 +CVE-2022-41723 +CVE-2022-27664 +CVE-2021-44716 +CVE-2021-33194 +CVE-2022-27191 +CVE-2021-43565 +CVE-2020-29652 +CVE-2022-40083 From 81a439d18adca04eb5ab4a9d30ef6eb8387f11e2 Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 4 Jul 2023 15:10:59 +0000 Subject: [PATCH 079/160] add info to Makefile --- Makefile | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index aa3b0ea..a449439 100644 --- a/Makefile +++ b/Makefile @@ -1,10 +1,39 @@ -# first is default task when running "make" without args -build: format gobuild lint +### ----------------------- +# --- Building +### ----------------------- + +# first is default target when running "make" without args +build: ##- Default 'make' target: go-format, go-build and lint. + @$(MAKE) format + @$(MAKE) gobuild + @$(MAKE) lint + +# useful to ensure that everything gets resetuped from scratch +all: clean init ##- Runs all of our common make targets: clean, init, build and test. + @$(MAKE) build + @$(MAKE) test + +info: info-go ##- Prints additional info + +info-go: ##- (opt) Prints go.mod updates, module-name and current go version. + @echo "[go.mod]" > tmp/.info-go + @$(MAKE) get-go-outdated-modules >> tmp/.info-go + @$(MAKE) info-module-name >> tmp/.info-go + @go version >> tmp/.info-go + @cat tmp/.info-go + +# TODO: switch to "-m direct" after go 1.17 hits: https://github.com/golang/go/issues/40364 +get-go-outdated-modules: ##- (opt) Prints outdated (direct) go modules (from go.mod). + @((go list -u -m -f '{{if and .Update (not .Indirect)}}{{.}}{{end}}' all) 2>/dev/null | grep " ") || echo "go modules are up-to-date." + +info-module-name: ##- (opt) Prints current go module-name. + @echo "go module-name: '${GO_MODULE_NAME}'" + format: go fmt -gobuild: +gobuild: go build -o bin/integresql ./cmd/server lint: From a754040eebc9064d9b1166b13006f7db2bca3e6c Mon Sep 17 00:00:00 2001 From: anjankow Date: Tue, 4 Jul 2023 15:28:39 +0000 Subject: [PATCH 080/160] add tmp directory --- .gitignore | 3 +++ tmp/.gitignore | 0 2 files changed, 3 insertions(+) create mode 100644 tmp/.gitignore diff --git a/.gitignore b/.gitignore index 6ec4c1c..24c54ca 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,6 @@ bin # local go mod cache .pkg + +# temporary files +tmp \ No newline at end of file diff --git a/tmp/.gitignore b/tmp/.gitignore new file mode 100644 index 0000000..e69de29 From b622abb0318a8c2018ce72aac9cdc98bc3100d75 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 06:56:54 +0000 Subject: [PATCH 081/160] add lichen to tools --- go.mod | 2 +- go.sum | 36 ++++++++++++++++++++++++++++++++++++ tools.go | 5 +++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 6d76eaa..7662489 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.14 require ( github.com/labstack/echo/v4 v4.1.16 github.com/lib/pq v1.3.0 + github.com/uw-labs/lichen v0.1.7 // indirect golang.org/x/crypto v0.0.0-20200420104511-884d27f42877 // indirect golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect - golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect ) diff --git a/go.sum b/go.sum index 51a1bdc..c642730 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,25 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/licenseclassifier v0.0.0-20201113175434-78a70215ca36 h1:YGB3wNLUTvq+lbIwdNRsaMJvoX4mCKkwzHlmlT1V+ow= +github.com/google/licenseclassifier v0.0.0-20201113175434-78a70215ca36/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o= github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI= github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -15,11 +27,29 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/muesli/termenv v0.11.0 h1:fwNUbu2mfWlgicwG7qYzs06aOI8Z/zKPAv8J4uKbT+o= +github.com/muesli/termenv v0.11.0/go.mod h1:Bd5NYQ7pd+SrtBSrSNoBBmXlcY8+Xj4BMJgh8qcZrvs= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/urfave/cli/v2 v2.4.0 h1:m2pxjjDFgDxSPtO8WSdbndj17Wu2y8vOT86wE/tjr+I= +github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg= +github.com/uw-labs/lichen v0.1.7 h1:SDNE3kThhhtP70XfLN/C2bqaT9Epefg1i10lhWYIG4g= +github.com/uw-labs/lichen v0.1.7/go.mod h1:bvEgoBeVZGhzstRxPEpEwM4TGT6AJZ6GA29a4FuLxYw= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= @@ -45,6 +75,8 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY= golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -53,3 +85,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tools.go b/tools.go index 37c9136..c984c2f 100644 --- a/tools.go +++ b/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools // Tooling dependencies @@ -9,3 +10,7 @@ // Other tooling may be installed as *static binary* directly within the Dockerfile package tools + +import ( + _ "github.com/uw-labs/lichen" +) From 1e8d0c548eabe9ace9d08e9678cbd751c9bf6f85 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 06:57:14 +0000 Subject: [PATCH 082/160] makefile: add get-embedded-modules-count and get-licenses --- Makefile | 109 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 100 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index a449439..8e00bff 100644 --- a/Makefile +++ b/Makefile @@ -22,14 +22,6 @@ info-go: ##- (opt) Prints go.mod updates, module-name and current go version. @go version >> tmp/.info-go @cat tmp/.info-go -# TODO: switch to "-m direct" after go 1.17 hits: https://github.com/golang/go/issues/40364 -get-go-outdated-modules: ##- (opt) Prints outdated (direct) go modules (from go.mod). - @((go list -u -m -f '{{if and .Update (not .Indirect)}}{{.}}{{end}}' all) 2>/dev/null | grep " ") || echo "go modules are up-to-date." - -info-module-name: ##- (opt) Prints current go module-name. - @echo "go module-name: '${GO_MODULE_NAME}'" - - format: go fmt @@ -47,6 +39,11 @@ lint: test: richgo test -cover -race -count=1 ./... +# TODO: switch to "-m direct" after go 1.17 hits: https://github.com/golang/go/issues/40364 +get-go-outdated-modules: ##- (opt) Prints outdated (direct) go modules (from go.mod). + @((go list -u -m -f '{{if and .Update (not .Indirect)}}{{.}}{{end}}' all) 2>/dev/null | grep " ") || echo "go modules are up-to-date." + + init: modules tools tidy @go version @@ -71,6 +68,100 @@ reset: psql -d postgres -c 'DROP DATABASE IF EXISTS "${PGDATABASE}";' psql -d postgres -c 'CREATE DATABASE "${PGDATABASE}" WITH OWNER ${PGUSER} TEMPLATE "template0"' +### ----------------------- +# --- Binary checks +### ----------------------- + +# Got license issues with some dependencies? Provide a custom lichen --config +# see https://github.com/uw-labs/lichen#config +get-licenses: ##- Prints licenses of embedded modules in the compiled bin/integresql. + lichen bin/integresql + +get-embedded-modules: ##- Prints embedded modules in the compiled bin/integresql. + go version -m -v bin/integresql + +get-embedded-modules-count: ##- (opt) Prints count of embedded modules in the compiled bin/integresql. + go version -m -v bin/integresql | grep $$'\tdep' | wc -l + + +### ----------------------- +# --- Helpers +### ----------------------- + +get-module-name: ##- Prints current go module-name (pipeable). + @echo "${GO_MODULE_NAME}" + +info-module-name: ##- (opt) Prints current go module-name. + @echo "go module-name: '${GO_MODULE_NAME}'" + +set-module-name: ##- Wizard to set a new go module-name. + @rm -f tmp/.modulename + @$(MAKE) info-module-name + @echo "Enter new go module-name:" \ + && read new_module_name \ + && echo "new go module-name: '$${new_module_name}'" \ + && echo -n "Are you sure? [y/N]" \ + && read ans && [ $${ans:-N} = y ] \ + && echo -n "Please wait..." \ + && find . -not -path '*/\.*' -not -path './Makefile' -type f -exec sed -i "s|${GO_MODULE_NAME}|$${new_module_name}|g" {} \; \ + && echo "new go module-name: '$${new_module_name}'!" + @rm -f tmp/.modulename + +force-module-name: ##- Overwrite occurrences of 'allaboutapps.dev/aw/go-starter' with current go module-name. + find . -not -path '*/\.*' -not -path './Makefile' -type f -exec sed -i "s|allaboutapps.dev/aw/go-starter|${GO_MODULE_NAME}|g" {} \; + +get-go-ldflags: ##- (opt) Prints used -ldflags as evaluated in Makefile used in make go-build + @echo $(LDFLAGS) + +### ----------------------- +# --- Make variables +### ----------------------- + +# only evaluated if required by a recipe +# http://make.mad-scientist.net/deferred-simple-variable-expansion/ + +# go module name (as in go.mod) +GO_MODULE_NAME = $(eval GO_MODULE_NAME := $$(shell \ + (mkdir -p tmp 2> /dev/null && cat tmp/.modulename 2> /dev/null) \ + || (gsdev modulename 2> /dev/null | tee tmp/.modulename) || echo "unknown" \ +))$(GO_MODULE_NAME) + + +# https://medium.com/the-go-journey/adding-version-information-to-go-binaries-e1b79878f6f2 +ARG_COMMIT = $(eval ARG_COMMIT := $$(shell \ + (git rev-list -1 HEAD 2> /dev/null) \ + || (echo "unknown") \ +))$(ARG_COMMIT) + +ARG_BUILD_DATE = $(eval ARG_BUILD_DATE := $$(shell \ + (date -Is 2> /dev/null || date 2> /dev/null || echo "unknown") \ +))$(ARG_BUILD_DATE) + +# https://www.digitalocean.com/community/tutorials/using-ldflags-to-set-version-information-for-go-applications +LDFLAGS = $(eval LDFLAGS := "\ +-X '$(GO_MODULE_NAME)/internal/config.ModuleName=$(GO_MODULE_NAME)'\ +-X '$(GO_MODULE_NAME)/internal/config.Commit=$(ARG_COMMIT)'\ +-X '$(GO_MODULE_NAME)/internal/config.BuildDate=$(ARG_BUILD_DATE)'\ +")$(LDFLAGS) + +### ----------------------- +# --- Special targets +### ----------------------- + +# https://www.gnu.org/software/make/manual/html_node/Special-Targets.html # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html # ignore matching file/make rule combinations in working-dir -.PHONY: test +.PHONY: test help + +# https://unix.stackexchange.com/questions/153763/dont-stop-makeing-if-a-command-fails-but-check-exit-status +# https://www.gnu.org/software/make/manual/html_node/One-Shell.html +# required to ensure make fails if one recipe fails (even on parallel jobs) and on pipefails +# .ONESHELL: + +# # normal POSIX bash shell mode +# SHELL = /bin/bash +# .SHELLFLAGS = -cEeuo pipefail + +# # wrapped make time tracing shell, use it via MAKE_TRACE_TIME=true make +# SHELL = /app/rksh +# .SHELLFLAGS = $@ \ No newline at end of file From 35ea8a67b9cc51c86c47ecc7031f5b0e552a6b9b Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 07:00:43 +0000 Subject: [PATCH 083/160] tidy go.mod --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7662489..4e7469d 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.14 require ( github.com/labstack/echo/v4 v4.1.16 github.com/lib/pq v1.3.0 - github.com/uw-labs/lichen v0.1.7 // indirect + github.com/uw-labs/lichen v0.1.7 golang.org/x/crypto v0.0.0-20200420104511-884d27f42877 // indirect golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect ) diff --git a/go.sum b/go.sum index c642730..971ff93 100644 --- a/go.sum +++ b/go.sum @@ -45,6 +45,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/urfave/cli/v2 v2.4.0 h1:m2pxjjDFgDxSPtO8WSdbndj17Wu2y8vOT86wE/tjr+I= github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg= @@ -73,8 +74,6 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -88,4 +87,5 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 0fa5fba3e58bb07fbc12f7ac976008f107c103dc Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 07:11:07 +0000 Subject: [PATCH 084/160] drone: use builder-integresql --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 9f51a77..73c0ed7 100644 --- a/.drone.yml +++ b/.drone.yml @@ -133,7 +133,7 @@ pipeline: environment: IMAGE_TAG: *IMAGE_BUILDER_ID commands: - - "docker build --target builder --compress -t $${IMAGE_TAG} ." + - "docker build --target builder-integresql --compress -t $${IMAGE_TAG} ." <<: *WHEN_BUILD_EVENT "docker build (target integresql)": From a0d782dbb0a77cab325c8c286321a14cafcccf15 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 11:03:51 +0200 Subject: [PATCH 085/160] drone: remove docker run step --- .drone.yml | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/.drone.yml b/.drone.yml index 73c0ed7..23c4bb3 100644 --- a/.drone.yml +++ b/.drone.yml @@ -225,26 +225,26 @@ pipeline: environment: *TEST_ENV <<: *WHEN_BUILD_EVENT - "docker run (target app)": - group: test - image: docker:latest - volumes: - - /var/run/docker.sock:/var/run/docker.sock - environment: - <<: *TEST_ENV - IMAGE_TAG: *IMAGE_DEPLOY_ID - commands: - # Note: NO network related tests are possible here, dnd can just - # run sibling containers. We have no possibility to connect them - # into the drone user defined per build docker network! - # https://github.com/drone-plugins/drone-docker/issues/193 - # https://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/ - - (env | grep "^\S*=" | grep -v -e "DRONE=" -e "DRONE_" -e "CI_" -e "CI=" -e "HOME=" -e "HOSTNAME=" -e "SHELL=" -e "PWD=" -e "PATH=") > .hostenv - - cat .hostenv - - "docker run --env-file .hostenv $${IMAGE_TAG} help" - - "docker run --env-file .hostenv $${IMAGE_TAG} -v" - - "docker run --env-file .hostenv $${IMAGE_TAG} env" - <<: *WHEN_BUILD_EVENT + # "docker run (target app)": + # group: test + # image: docker:latest + # volumes: + # - /var/run/docker.sock:/var/run/docker.sock + # environment: + # <<: *TEST_ENV + # IMAGE_TAG: *IMAGE_DEPLOY_ID + # commands: + # # Note: NO network related tests are possible here, dnd can just + # # run sibling containers. We have no possibility to connect them + # # into the drone user defined per build docker network! + # # https://github.com/drone-plugins/drone-docker/issues/193 + # # https://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/ + # - (env | grep "^\S*=" | grep -v -e "DRONE=" -e "DRONE_" -e "CI_" -e "CI=" -e "HOME=" -e "HOSTNAME=" -e "SHELL=" -e "PWD=" -e "PATH=") > .hostenv + # - cat .hostenv + # - "docker run --env-file .hostenv $${IMAGE_TAG} help" + # - "docker run --env-file .hostenv $${IMAGE_TAG} -v" + # - "docker run --env-file .hostenv $${IMAGE_TAG} env" + # <<: *WHEN_BUILD_EVENT # --------------------------------------------------------------------------- # PUBLISH From 8c768a708044e87e83aed11970039f86d971c603 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 11:04:49 +0200 Subject: [PATCH 086/160] drone: add test branch to publish branches --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 23c4bb3..3338520 100644 --- a/.drone.yml +++ b/.drone.yml @@ -30,7 +30,7 @@ alias: - &IMAGE_DEPLOY_ID ${DRONE_REPO,,}:${DRONE_COMMIT_SHA} # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) - - &GCR_PUBLISH_BRANCHES [dev, staging, master] + - &GCR_PUBLISH_BRANCHES [master, aj/drone-pipeline] # Docker registry publish default settings - &GCR_REGISTRY_SETTINGS From 11f00ae8ac3cb1f58886c4fc07fad30a13b3de50 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 11:12:11 +0200 Subject: [PATCH 087/160] drone: remove test branch name --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 3338520..8c228d3 100644 --- a/.drone.yml +++ b/.drone.yml @@ -30,7 +30,7 @@ alias: - &IMAGE_DEPLOY_ID ${DRONE_REPO,,}:${DRONE_COMMIT_SHA} # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) - - &GCR_PUBLISH_BRANCHES [master, aj/drone-pipeline] + - &GCR_PUBLISH_BRANCHES [master] # Docker registry publish default settings - &GCR_REGISTRY_SETTINGS From 53f1a8053847ed917909c242ee5810f13ba6dc92 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 11:12:52 +0200 Subject: [PATCH 088/160] drone: add dev branch to publish --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 8c228d3..28f80a2 100644 --- a/.drone.yml +++ b/.drone.yml @@ -30,7 +30,7 @@ alias: - &IMAGE_DEPLOY_ID ${DRONE_REPO,,}:${DRONE_COMMIT_SHA} # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) - - &GCR_PUBLISH_BRANCHES [master] + - &GCR_PUBLISH_BRANCHES [dev, master] # Docker registry publish default settings - &GCR_REGISTRY_SETTINGS From 61f9f304a7bc860717ebc9612b81ea4335b6d60e Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 11:18:47 +0000 Subject: [PATCH 089/160] update Makefile --- Dockerfile | 1 + Makefile | 74 +++++++++++++++++++++++++++++------------------------- 2 files changed, 41 insertions(+), 34 deletions(-) diff --git a/Dockerfile b/Dockerfile index d23384e..6d23916 100644 --- a/Dockerfile +++ b/Dockerfile @@ -28,6 +28,7 @@ RUN apt-get update \ locales \ sudo \ bash-completion \ + bsdmainutils \ postgresql-client-12 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/Makefile b/Makefile index b7d1df1..284edf2 100644 --- a/Makefile +++ b/Makefile @@ -56,24 +56,32 @@ get-go-outdated-modules: ##- (opt) Prints outdated (direct) go modules (from go. @((go list -u -m -f '{{if and .Update (not .Indirect)}}{{.}}{{end}}' all) 2>/dev/null | grep " ") || echo "go modules are up-to-date." -init: modules tools tidy +### ----------------------- +# --- Initializing +### ----------------------- + +init: ##- Runs make modules, tools and tidy. + @$(MAKE) modules + @$(MAKE) tools + @$(MAKE) tidy @go version # cache go modules (locally into .pkg) -modules: +modules: ##- (opt) Cache packages as specified in go.mod. go mod download # https://marcofranssen.nl/manage-go-tools-via-go-modules/ -tools: - cat tools.go | grep _ | awk -F'"' '{print $$2}' | xargs -tI % go install % +tools: ##- (opt) Install packages as specified in tools.go. + @cat tools.go | grep _ | awk -F'"' '{print $$2}' | xargs -P $$(nproc) -tI % go install % -tidy: +tidy: ##- (opt) Tidy our go.sum file. go mod tidy -clean: - rm -rf bin +### ----------------------- +# --- SQL +### ----------------------- -reset: +reset: ##- Wizard to drop and create our development database. @echo "DROP & CREATE database:" @echo " PGHOST=${PGHOST} PGDATABASE=${PGDATABASE}" PGUSER=${PGUSER} @echo -n "Are you sure? [y/N] " && read ans && [ $${ans:-N} = y ] @@ -100,44 +108,42 @@ get-embedded-modules-count: ##- (opt) Prints count of embedded modules in the co # --- Helpers ### ----------------------- +clean: ##- Cleans tmp folders. + @echo "make clean" + @rm -rf tmp/* 2> /dev/null + @rm -rf api/tmp/* 2> /dev/null + get-module-name: ##- Prints current go module-name (pipeable). @echo "${GO_MODULE_NAME}" info-module-name: ##- (opt) Prints current go module-name. @echo "go module-name: '${GO_MODULE_NAME}'" -set-module-name: ##- Wizard to set a new go module-name. - @rm -f tmp/.modulename - @$(MAKE) info-module-name - @echo "Enter new go module-name:" \ - && read new_module_name \ - && echo "new go module-name: '$${new_module_name}'" \ - && echo -n "Are you sure? [y/N]" \ - && read ans && [ $${ans:-N} = y ] \ - && echo -n "Please wait..." \ - && find . -not -path '*/\.*' -not -path './Makefile' -type f -exec sed -i "s|${GO_MODULE_NAME}|$${new_module_name}|g" {} \; \ - && echo "new go module-name: '$${new_module_name}'!" - @rm -f tmp/.modulename - -force-module-name: ##- Overwrite occurrences of 'allaboutapps.dev/aw/go-starter' with current go module-name. - find . -not -path '*/\.*' -not -path './Makefile' -type f -exec sed -i "s|allaboutapps.dev/aw/go-starter|${GO_MODULE_NAME}|g" {} \; - get-go-ldflags: ##- (opt) Prints used -ldflags as evaluated in Makefile used in make go-build @echo $(LDFLAGS) +# https://gist.github.com/prwhite/8168133 - based on comment from @m000 +help: ##- Show common make targets. + @echo "usage: make " + @echo "note: use 'make help-all' to see all make targets." + @echo "" + @sed -e '/#\{2\}-/!d; s/\\$$//; s/:[^#\t]*/@/; s/#\{2\}- *//' $(MAKEFILE_LIST) | grep --invert "(opt)" | sort | column -t -s '@' + +help-all: ##- Show all make targets. + @echo "usage: make " + @echo "note: make targets flagged with '(opt)' are part of a main target." + @echo "" + @sed -e '/#\{2\}-/!d; s/\\$$//; s/:[^#\t]*/@/; s/#\{2\}- *//' $(MAKEFILE_LIST) | sort | column -t -s '@' + ### ----------------------- # --- Make variables ### ----------------------- -# only evaluated if required by a recipe -# http://make.mad-scientist.net/deferred-simple-variable-expansion/ - # go module name (as in go.mod) -GO_MODULE_NAME = $(eval GO_MODULE_NAME := $$(shell \ - (mkdir -p tmp 2> /dev/null && cat tmp/.modulename 2> /dev/null) \ - || (gsdev modulename 2> /dev/null | tee tmp/.modulename) || echo "unknown" \ -))$(GO_MODULE_NAME) +GO_MODULE_NAME = github.com/allaboutapps/integresql +# only evaluated if required by a recipe +# http://make.mad-scientist.net/deferred-simple-variable-expansion/ # https://medium.com/the-go-journey/adding-version-information-to-go-binaries-e1b79878f6f2 ARG_COMMIT = $(eval ARG_COMMIT := $$(shell \ @@ -168,12 +174,12 @@ LDFLAGS = $(eval LDFLAGS := "\ # https://unix.stackexchange.com/questions/153763/dont-stop-makeing-if-a-command-fails-but-check-exit-status # https://www.gnu.org/software/make/manual/html_node/One-Shell.html # required to ensure make fails if one recipe fails (even on parallel jobs) and on pipefails -# .ONESHELL: +.ONESHELL: # # normal POSIX bash shell mode # SHELL = /bin/bash # .SHELLFLAGS = -cEeuo pipefail -# # wrapped make time tracing shell, use it via MAKE_TRACE_TIME=true make -# SHELL = /app/rksh +# wrapped make time tracing shell, use it via MAKE_TRACE_TIME=true make +# SHELL = /bin/rksh # .SHELLFLAGS = $@ \ No newline at end of file From c79532d6bb27cca0b16e55c8516968ff2761b309 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 11:19:27 +0000 Subject: [PATCH 090/160] trivy: remove ignored vulnerabilities --- .trivyignore | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.trivyignore b/.trivyignore index e6e2213..e69de29 100644 --- a/.trivyignore +++ b/.trivyignore @@ -1,11 +0,0 @@ -# TODO: remove them all when a maintained golang image is used -CVE-2022-32149 -CVE-2021-38561 -CVE-2022-41723 -CVE-2022-27664 -CVE-2021-44716 -CVE-2021-33194 -CVE-2022-27191 -CVE-2021-43565 -CVE-2020-29652 -CVE-2022-40083 From f4f30e5bb8b050fdb54a287407054fdba3a4b760 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 11:25:33 +0000 Subject: [PATCH 091/160] update outdate packages --- go.mod | 27 ++++++++++---------- go.sum | 80 ++++++++++++++++++++++++++-------------------------------- 2 files changed, 50 insertions(+), 57 deletions(-) diff --git a/go.mod b/go.mod index 38c0d6a..4f748c6 100644 --- a/go.mod +++ b/go.mod @@ -4,26 +4,27 @@ go 1.20 require ( github.com/google/uuid v1.3.0 - github.com/labstack/echo/v4 v4.1.16 - github.com/lib/pq v1.3.0 - github.com/stretchr/testify v1.7.1 + github.com/labstack/echo/v4 v4.10.2 + github.com/lib/pq v1.10.9 + github.com/stretchr/testify v1.8.4 golang.org/x/sync v0.3.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/kr/pretty v0.2.1 // indirect - github.com/labstack/gommon v0.3.0 // indirect - github.com/mattn/go-colorable v0.1.6 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/labstack/gommon v0.4.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/valyala/fasttemplate v1.1.0 // indirect - golang.org/x/crypto v0.0.0-20200420104511-884d27f42877 // indirect - golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect - golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect - golang.org/x/text v0.3.7 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect + golang.org/x/crypto v0.6.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/time v0.3.0 // indirect gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 18557a0..5a270e2 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= @@ -10,58 +10,50 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o= -github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI= -github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/labstack/echo/v4 v4.10.2 h1:n1jAhnq/elIFTHr1EYpiYtyKgx4RW9ccVgkqByZaN2M= +github.com/labstack/echo/v4 v4.10.2/go.mod h1:OEyqf2//K1DFdE57vw2DRgWY0M7s65IVQO2FzvI4J5k= +github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8= +github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4= -github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200420104511-884d27f42877 h1:IhZPbxNd1UjBCaD5AfpSSbJTRlp+ZSuyuH5uoksNS04= -golang.org/x/crypto v0.0.0-20200420104511-884d27f42877/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 700b9e29fe5d14425c563f391608b769c47c4084 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 12:42:56 +0000 Subject: [PATCH 092/160] add TestDatabaseForceReturn config --- pkg/manager/manager.go | 4 ++-- pkg/manager/manager_config.go | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index c953b84..16ecdf9 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -316,7 +316,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData if errors.Is(err, pool.ErrTimeout) { // on timeout we can try to extend the pool ctx, task := trace.NewTask(ctx, "extend_pool_on_demand") - testDB, err = m.pool.ExtendPool(ctx, template.Database) + testDB, err = m.pool.ExtendPool(ctx, template.Database, !m.config.TestDatabaseForceReturn) task.End() } else if errors.Is(err, pool.ErrUnknownHash) { @@ -329,7 +329,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData m.pool.InitHashPool(ctx, template.Database, initDBFunc) // pool initalized, create one test db - testDB, err = m.pool.ExtendPool(ctx, template.Database) + testDB, err = m.pool.ExtendPool(ctx, template.Database, !m.config.TestDatabaseForceReturn) // // and add new test DBs in the background // m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index f66468d..face104 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -16,11 +16,12 @@ type ManagerConfig struct { TestDatabasePrefix string TestDatabaseOwner string TestDatabaseOwnerPassword string - TestDatabaseInitialPoolSize int - TestDatabaseMaxPoolSize int + TestDatabaseInitialPoolSize int // Initial number of read DBs prepared in background + TestDatabaseMaxPoolSize int // Maximal pool size that won't be exceeded TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state TestDatabaseGetTimeout time.Duration // Time to wait for a ready database before extending the pool NumOfCleaningWorkers int // Number of pool workers cleaning up dirty DBs + TestDatabaseForceReturn bool // Force returning used test DBs. If set to true, error "pool full" can be returned when extending is requested and max pool size is reached. Otherwise old test DBs will be reused. } func DefaultManagerConfigFromEnv() ManagerConfig { @@ -60,5 +61,6 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 2000)), TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), NumOfCleaningWorkers: util.GetEnvAsInt("INTEGRESQL_NUM_OF_CLEANING_WORKERS", 3), + TestDatabaseForceReturn: util.GetEnvAsBool("INTEGRESQL_TEST_DB_FORCE_RETURN", false), } } From 6f4eb43454bf78ba3dd131d136e833b36fd2e7fb Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 12:43:27 +0000 Subject: [PATCH 093/160] implement simple recycling of InUse db --- pkg/manager/manager_test.go | 1 + pkg/pool/pool.go | 40 ++++++++++++++++++++--- pkg/pool/pool_test.go | 63 ++++++++++++++++++++++++++++++++++++- 3 files changed, 98 insertions(+), 6 deletions(-) diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index f26b74e..2e1fe30 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -659,6 +659,7 @@ func TestManagerGetTestDatabaseExtendingPool(t *testing.T) { // should extend up to 10 on demand cfg.TestDatabaseMaxPoolSize = 10 cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond + cfg.TestDatabaseForceReturn = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index f241ab4..77a8d6c 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -181,7 +181,7 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in // DBPool unlocked // ! - newTestDB, err := pool.extend(ctx, dbStateReady, p.dbNamePrefix) + newTestDB, err := pool.extend(ctx, dbStateReady, p.dbNamePrefix, false /* recycleNotReturned */) if err != nil { return err } @@ -194,8 +194,10 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in // AddTestDatabase adds a new test DB to the pool, creates it according to the template, and returns it right away to the caller. // The new test DB is marked as 'IsUse' and won't be picked up with GetTestDatabase, until it's returned to the pool. -// If the pool size has already reached MAX, ErrPoolFull is returned. -func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { +// recycleNotReturned is an optional flag. If set to true, when a pool size has reached MAX and extension is requested, +// not returned databases (marked as 'InUse') can be recycled. +// This flag prevents receiving error ErrPoolFull. +func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database, recycleNotReturned bool) (db.TestDatabase, error) { hash := templateDB.TemplateHash // ! @@ -216,7 +218,7 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes // ! // because we return it right away, we treat it as 'inUse' - newTestDB, err := pool.extend(ctx, dbStateInUse, p.dbNamePrefix) + newTestDB, err := pool.extend(ctx, dbStateInUse, p.dbNamePrefix, recycleNotReturned) if err != nil { return db.TestDatabase{}, err } @@ -399,7 +401,7 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { } } -func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix string) (db.TestDatabase, error) { +func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix string, recycleNotReturned bool) (db.TestDatabase, error) { // ! // dbHashPool locked reg := trace.StartRegion(ctx, "extend_wait_for_lock_hash_pool") @@ -410,6 +412,12 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix // get index of a next test DB - its ID index := len(pool.dbs) if index == cap(pool.dbs) { + + if recycleNotReturned { + // if recycleNotReturned is allowed, try it instead of returning error + return pool.unsafeRecycleInUseTestDB(ctx) + } + return db.TestDatabase{}, ErrPoolFull } @@ -439,6 +447,28 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix // ! } +// unsafeRecycleInUseTestDB searches for a test DB that is in use and that can be dropped and recreated. +// WARNING: pool has to be already locked by a colling function! +func (pool *dbHashPool) unsafeRecycleInUseTestDB(ctx context.Context) (db.TestDatabase, error) { + + for id := 0; id < len(pool.dbs); id++ { + if pool.dbs[id].state == dbStateInUse { + testDB := pool.dbs[id].TestDatabase + templateDB := pool.templateDB.Config.Database + + if err := pool.recreateDB(ctx, testDB, templateDB); err != nil { + // probably still in use, we will continue to search for another ready to be recreated DB + continue + } + + return testDB, nil + } + } + + // we went through all the test DBs and none is ready to be recreated -> pool is full + return db.TestDatabase{}, ErrPoolFull +} + func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error { // stop the worker diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index ff3f3f0..f38f7c6 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -255,7 +255,7 @@ func TestPoolInit(t *testing.T) { assert.NoError(t, err) // extend pool (= add and get) - _, err = p.ExtendPool(ctx, templateDB1) + _, err = p.ExtendPool(ctx, templateDB1, false /* recycleNotReturned */) assert.NoError(t, err) } @@ -292,3 +292,64 @@ func TestPoolInit(t *testing.T) { p.Stop() } + +func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + templateDB1 := db.Database{ + TemplateHash: hash1, + Config: db.DatabaseConfig{ + Database: "h1_template", + }, + } + + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + t.Log("(re)create ", testDB.Database, ", template name: ", templateName) + return nil + } + + maxPoolSize := 40 + numOfWorkers := 1 + p := pool.NewDBPool(maxPoolSize, "test_", numOfWorkers) + p.InitHashPool(ctx, templateDB1, initFunc) + + for i := 0; i < maxPoolSize; i++ { + // add and get freshly added DB + _, err := p.ExtendPool(ctx, templateDB1, false /* recycleNotReturned */) + assert.NoError(t, err) + } + + // extend pool not allowing recycling inUse test DBs + _, err := p.ExtendPool(ctx, templateDB1, false /* recycleNotReturned */) + assert.ErrorIs(t, err, pool.ErrPoolFull) + + forceExtend := func(seenIDMap *sync.Map) { + newTestDB1, err := p.ExtendPool(ctx, templateDB1, true /* recycleNotReturned */) + assert.NoError(t, err) + assert.Equal(t, hash1, newTestDB1.TemplateHash) + seenIDMap.Store(newTestDB1.ID, true) + } + + // allow for recycling inUse test DBs + var wg sync.WaitGroup + seenIDMap := sync.Map{} + for i := 0; i < 3*maxPoolSize; i++ { + wg.Add(1) + go func() { + defer wg.Done() + forceExtend(&seenIDMap) + }() + } + + wg.Wait() + + for id := 0; id < maxPoolSize; id++ { + _, ok := seenIDMap.Load(id) + // every index that %5 != 0 should show up at least once + assert.Equal(t, id%5 != 0, ok, id) + } + + p.Stop() +} From b1b388d9e07e6445817103043d6af35026da7d61 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 14:24:31 +0000 Subject: [PATCH 094/160] track db creation time --- pkg/pool/pool.go | 56 +++++++++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 77a8d6c..e002334 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -51,8 +51,18 @@ func NewDBPool(maxPoolSize int, testDBNamePrefix string, numberOfWorkers int) *D // RecreateDBFunc callback executed when a pool is extended or the DB cleaned up by a worker. type RecreateDBFunc func(ctx context.Context, testDB db.TestDatabase, templateName string) error +func makeActualRecreateTestDBFunc(templateName string, userRecreateFunc RecreateDBFunc) recreateTestDBFunc { + return func(ctx context.Context, testDBWrapper *existingDB) error { + testDBWrapper.createdAt = time.Now() + return userRecreateFunc(ctx, testDBWrapper.TestDatabase, templateName) + } +} + +type recreateTestDBFunc func(context.Context, *existingDB) error + type existingDB struct { - state dbState + state dbState + createdAt time.Time db.TestDatabase } @@ -62,7 +72,7 @@ type dbHashPool struct { ready chan int // ID of initalized DBs according to a template, ready to pick them up dirty chan int // ID of returned DBs, need to be recreated to reuse them - recreateDB RecreateDBFunc + recreateDB recreateTestDBFunc templateDB db.Database sync.RWMutex wg sync.WaitGroup @@ -329,7 +339,7 @@ func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Dat dbs: make([]existingDB, 0, maxPoolSize), ready: make(chan int, maxPoolSize), dirty: make(chan int, maxPoolSize), - recreateDB: recreateDB, + recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, recreateDB), templateDB: templateDB, numOfWorkers: numberOfWorkers, } @@ -349,8 +359,6 @@ func (pool *dbHashPool) enableWorker(numberOfWorkers int) { // When the DB is recreated according to a template, its index goes to the 'ready' channel. func (pool *dbHashPool) workerCleanUpDirtyDB() { - templateName := pool.templateDB.Config.Database - for dirtyID := range pool.dirty { if dirtyID == stopWorkerMessage { break @@ -377,7 +385,7 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { } reg := trace.StartRegion(ctx, "worker_cleanup") - if err := pool.recreateDB(ctx, testDB.TestDatabase, templateName); err != nil { + if err := pool.recreateDB(ctx, &testDB); err != nil { // TODO anna: error handling fmt.Printf("integresql: failed to clean up DB: %v\n", err) @@ -421,47 +429,47 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix return db.TestDatabase{}, ErrPoolFull } - // initalization of a new DB - newTestDB := db.TestDatabase{ - Database: db.Database{ - TemplateHash: pool.templateDB.TemplateHash, - Config: pool.templateDB.Config, + // initalization of a new DB using template config + newTestDB := existingDB{ + state: state, + createdAt: time.Now(), + TestDatabase: db.TestDatabase{ + Database: db.Database{ + TemplateHash: pool.templateDB.TemplateHash, + Config: pool.templateDB.Config, + }, + ID: index, }, - ID: index, } - // set DB name - dbName := makeDBName(testDBPrefix, pool.templateDB.TemplateHash, index) - newTestDB.Database.Config.Database = dbName + newTestDB.Database.Config.Database = makeDBName(testDBPrefix, pool.templateDB.TemplateHash, index) - templateDB := pool.templateDB.Config.Database - if err := pool.recreateDB(ctx, newTestDB, templateDB); err != nil { + if err := pool.recreateDB(ctx, &newTestDB); err != nil { return db.TestDatabase{}, err } // add new test DB to the pool - pool.dbs = append(pool.dbs, existingDB{state: state, TestDatabase: newTestDB}) + pool.dbs = append(pool.dbs, newTestDB) - return newTestDB, nil + return newTestDB.TestDatabase, nil // dbHashPool unlocked // ! } // unsafeRecycleInUseTestDB searches for a test DB that is in use and that can be dropped and recreated. -// WARNING: pool has to be already locked by a colling function! +// WARNING: pool has to be already locked by a calling function! func (pool *dbHashPool) unsafeRecycleInUseTestDB(ctx context.Context) (db.TestDatabase, error) { for id := 0; id < len(pool.dbs); id++ { if pool.dbs[id].state == dbStateInUse { - testDB := pool.dbs[id].TestDatabase - templateDB := pool.templateDB.Config.Database + testDB := pool.dbs[id] - if err := pool.recreateDB(ctx, testDB, templateDB); err != nil { + if err := pool.recreateDB(ctx, &testDB); err != nil { // probably still in use, we will continue to search for another ready to be recreated DB continue } - return testDB, nil + return testDB.TestDatabase, nil } } From b067fb3d01bd14d0102529a05b0529919666fe13 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Wed, 5 Jul 2023 17:14:01 +0200 Subject: [PATCH 095/160] mr/dev prepublish docker image aj/pooling-improvements --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 28f80a2..bf31896 100644 --- a/.drone.yml +++ b/.drone.yml @@ -30,7 +30,7 @@ alias: - &IMAGE_DEPLOY_ID ${DRONE_REPO,,}:${DRONE_COMMIT_SHA} # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) - - &GCR_PUBLISH_BRANCHES [dev, master] + - &GCR_PUBLISH_BRANCHES [dev, master, mr/dev] # Docker registry publish default settings - &GCR_REGISTRY_SETTINGS From 7ddb508b66407a953442bc0ceaf9805dfe4e4ea7 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 5 Jul 2023 15:44:16 +0000 Subject: [PATCH 096/160] recycle InUse dbs in order of creation time --- pkg/pool/pool.go | 24 ++++++++++++++++++------ pkg/pool/pool_test.go | 2 +- pkg/util/sort.go | 42 ++++++++++++++++++++++++++++++++++++++++++ pkg/util/sort_test.go | 26 ++++++++++++++++++++++++++ 4 files changed, 87 insertions(+), 7 deletions(-) create mode 100644 pkg/util/sort.go create mode 100644 pkg/util/sort_test.go diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index e002334..c1fdc19 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -5,10 +5,12 @@ import ( "errors" "fmt" "runtime/trace" + "sort" "sync" "time" "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/util" ) var ( @@ -460,17 +462,27 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix // WARNING: pool has to be already locked by a calling function! func (pool *dbHashPool) unsafeRecycleInUseTestDB(ctx context.Context) (db.TestDatabase, error) { + dbInUse := util.NewSliceToSortByTime[int]() for id := 0; id < len(pool.dbs); id++ { + testDB := pool.dbs[id] + if pool.dbs[id].state == dbStateInUse { - testDB := pool.dbs[id] + dbInUse.Add(testDB.createdAt, testDB.ID) + } + } - if err := pool.recreateDB(ctx, &testDB); err != nil { - // probably still in use, we will continue to search for another ready to be recreated DB - continue - } + sort.Sort(dbInUse) + for i := 0; i < len(dbInUse); i++ { + id := dbInUse[i].Data + testDB := pool.dbs[id] - return testDB.TestDatabase, nil + if err := pool.recreateDB(ctx, &testDB); err != nil { + // probably still in use, we will continue to search for another ready to be recreated DB + continue } + pool.dbs[id] = testDB + + return testDB.TestDatabase, nil } // we went through all the test DBs and none is ready to be recreated -> pool is full diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index f38f7c6..f51ffcb 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -348,7 +348,7 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { for id := 0; id < maxPoolSize; id++ { _, ok := seenIDMap.Load(id) // every index that %5 != 0 should show up at least once - assert.Equal(t, id%5 != 0, ok, id) + assert.True(t, ok, id) } p.Stop() diff --git a/pkg/util/sort.go b/pkg/util/sort.go new file mode 100644 index 0000000..e55cf89 --- /dev/null +++ b/pkg/util/sort.go @@ -0,0 +1,42 @@ +package util + +import ( + "time" +) + +// SliceSortedByTime keeps data that should be sorted by KeyTime +type SliceSortedByTime[T any] []structSortedByTime[T] + +// NewSliceToSortByTime creates a new SliceSortedByTime +func NewSliceToSortByTime[T any]() SliceSortedByTime[T] { + return SliceSortedByTime[T]{} +} + +type structSortedByTime[T any] struct { + KeyTime time.Time + Data T +} + +// Add adds a new element to the end of the slice. +// Call sort.Sort() on the slice to have it ordered. +func (s *SliceSortedByTime[T]) Add(t time.Time, data T) { + *s = append(*s, structSortedByTime[T]{ + KeyTime: t, + Data: data, + }) +} + +// Len implements sort.Interface +func (s SliceSortedByTime[T]) Len() int { + return len(s) +} + +// Less implements sort.Interface +func (s SliceSortedByTime[T]) Less(i, j int) bool { + return s[i].KeyTime.Before(s[j].KeyTime) +} + +// Swap implements sort.Interface +func (s SliceSortedByTime[T]) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/pkg/util/sort_test.go b/pkg/util/sort_test.go new file mode 100644 index 0000000..1a31f75 --- /dev/null +++ b/pkg/util/sort_test.go @@ -0,0 +1,26 @@ +package util_test + +import ( + "sort" + "testing" + "time" + + "github.com/allaboutapps/integresql/pkg/util" + "github.com/stretchr/testify/assert" +) + +func TestSliceSortedByTimeImplements(t *testing.T) { + assert.Implements(t, (*sort.Interface)(nil), new(util.SliceSortedByTime[int])) +} + +func TestSliceSortedByTimeSorted(t *testing.T) { + s := util.NewSliceToSortByTime[int]() + s.Add(time.Now().Add(time.Hour), 1) + s.Add(time.Now().Add(-time.Hour), 2) + s.Add(time.Now(), 3) + + sort.Sort(s) + assert.Equal(t, s[0].Data, 2, s[0].KeyTime) + assert.Equal(t, s[1].Data, 3, s[1].KeyTime) + assert.Equal(t, s[2].Data, 1, s[2].KeyTime) +} From 61fdb2239ca1d29daa7a0260c8f13b8142548c9e Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 6 Jul 2023 09:32:14 +0000 Subject: [PATCH 097/160] move forceDBReturn param to pool --- pkg/manager/manager.go | 19 +++++++----- pkg/pool/pool.go | 69 ++++++++++++++++++++++-------------------- pkg/pool/pool_test.go | 28 +++++++++-------- 3 files changed, 64 insertions(+), 52 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 16ecdf9..f9257e6 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -49,11 +49,16 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { } m := &Manager{ - config: config, - db: nil, - wg: sync.WaitGroup{}, - templates: templates.NewCollection(), - pool: pool.NewDBPool(config.TestDatabaseMaxPoolSize, testDBPrefix, config.NumOfCleaningWorkers), + config: config, + db: nil, + wg: sync.WaitGroup{}, + templates: templates.NewCollection(), + pool: pool.NewDBPool( + config.TestDatabaseMaxPoolSize, + testDBPrefix, + config.NumOfCleaningWorkers, + config.TestDatabaseForceReturn, + ), connectionCtx: context.TODO(), } @@ -316,7 +321,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData if errors.Is(err, pool.ErrTimeout) { // on timeout we can try to extend the pool ctx, task := trace.NewTask(ctx, "extend_pool_on_demand") - testDB, err = m.pool.ExtendPool(ctx, template.Database, !m.config.TestDatabaseForceReturn) + testDB, err = m.pool.ExtendPool(ctx, template.Database) task.End() } else if errors.Is(err, pool.ErrUnknownHash) { @@ -329,7 +334,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData m.pool.InitHashPool(ctx, template.Database, initDBFunc) // pool initalized, create one test db - testDB, err = m.pool.ExtendPool(ctx, template.Database, !m.config.TestDatabaseForceReturn) + testDB, err = m.pool.ExtendPool(ctx, template.Database) // // and add new test DBs in the background // m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index c1fdc19..e5efd43 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -35,18 +35,22 @@ type DBPool struct { pools map[string]*dbHashPool // map[hash] mutex sync.RWMutex - maxPoolSize int - dbNamePrefix string - numOfWorkers int // Number of cleaning workers (each hash pool has enables this number of workers) + maxPoolSize int + dbNamePrefix string + numOfWorkers int // Number of cleaning workers (each hash pool has enables this number of workers) + forceDBReturn bool // Force returning test DB. If set to false, test databases that are 'InUse' can be recycled (in not actually used). } -func NewDBPool(maxPoolSize int, testDBNamePrefix string, numberOfWorkers int) *DBPool { +// forceDBReturn set to false will allow reusing test databases that are marked as 'InUse'. +// Otherwise, test DB has to be returned when no longer needed and there are higher chances of getting ErrPoolFull when requesting a new DB. +func NewDBPool(maxPoolSize int, testDBNamePrefix string, numberOfWorkers int, forceDBReturn bool) *DBPool { return &DBPool{ pools: make(map[string]*dbHashPool), - maxPoolSize: maxPoolSize, - dbNamePrefix: testDBNamePrefix, - numOfWorkers: numberOfWorkers, + maxPoolSize: maxPoolSize, + dbNamePrefix: testDBNamePrefix, + numOfWorkers: numberOfWorkers, + forceDBReturn: forceDBReturn, } } @@ -76,9 +80,12 @@ type dbHashPool struct { recreateDB recreateTestDBFunc templateDB db.Database + + numOfWorkers int + forceDBReturn bool + sync.RWMutex - wg sync.WaitGroup - numOfWorkers int + wg sync.WaitGroup } // InitHashPool creates a new pool with a given template hash and starts the cleanup workers. @@ -86,12 +93,12 @@ func (p *DBPool) InitHashPool(ctx context.Context, templateDB db.Database, initD p.mutex.Lock() defer p.mutex.Unlock() - _ = p.initHashPool(ctx, templateDB, initDBFunc) + _ = p.initHashPool(ctx, templateDB, initDBFunc, p.forceDBReturn) } -func (p *DBPool) initHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { +func (p *DBPool) initHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc, forceDBReturn bool) *dbHashPool { // create a new dbHashPool - pool := newDBHashPool(p.maxPoolSize, initDBFunc, templateDB, p.numOfWorkers) + pool := newDBHashPool(p.maxPoolSize, initDBFunc, templateDB, p.numOfWorkers, forceDBReturn) // and start the cleaning worker pool.enableWorker(p.numOfWorkers) @@ -186,14 +193,14 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in pool := p.pools[hash] if pool == nil { - pool = p.initHashPool(ctx, templateDB, initFunc) + pool = p.initHashPool(ctx, templateDB, initFunc, p.forceDBReturn) } p.mutex.Unlock() // DBPool unlocked // ! - newTestDB, err := pool.extend(ctx, dbStateReady, p.dbNamePrefix, false /* recycleNotReturned */) + newTestDB, err := pool.extend(ctx, dbStateReady, p.dbNamePrefix) if err != nil { return err } @@ -206,10 +213,7 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in // AddTestDatabase adds a new test DB to the pool, creates it according to the template, and returns it right away to the caller. // The new test DB is marked as 'IsUse' and won't be picked up with GetTestDatabase, until it's returned to the pool. -// recycleNotReturned is an optional flag. If set to true, when a pool size has reached MAX and extension is requested, -// not returned databases (marked as 'InUse') can be recycled. -// This flag prevents receiving error ErrPoolFull. -func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database, recycleNotReturned bool) (db.TestDatabase, error) { +func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { hash := templateDB.TemplateHash // ! @@ -230,7 +234,7 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database, recycle // ! // because we return it right away, we treat it as 'inUse' - newTestDB, err := pool.extend(ctx, dbStateInUse, p.dbNamePrefix, recycleNotReturned) + newTestDB, err := pool.extend(ctx, dbStateInUse, p.dbNamePrefix) if err != nil { return db.TestDatabase{}, err } @@ -336,14 +340,15 @@ func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) // ! } -func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Database, numberOfWorkers int) *dbHashPool { +func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Database, numberOfWorkers int, forceDBReturn bool) *dbHashPool { return &dbHashPool{ - dbs: make([]existingDB, 0, maxPoolSize), - ready: make(chan int, maxPoolSize), - dirty: make(chan int, maxPoolSize), - recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, recreateDB), - templateDB: templateDB, - numOfWorkers: numberOfWorkers, + dbs: make([]existingDB, 0, maxPoolSize), + ready: make(chan int, maxPoolSize), + dirty: make(chan int, maxPoolSize), + recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, recreateDB), + templateDB: templateDB, + numOfWorkers: numberOfWorkers, + forceDBReturn: forceDBReturn, } } @@ -352,14 +357,14 @@ func (pool *dbHashPool) enableWorker(numberOfWorkers int) { pool.wg.Add(1) go func() { defer pool.wg.Done() - pool.workerCleanUpDirtyDB() + pool.workerCleanUpReturnedDB() }() } } -// workerCleanUpDirtyDB reads 'dirty' channel and cleans up a test DB with the received index. +// workerCleanUpReturnedDB reads 'dirty' channel and cleans up a test DB with the received index. // When the DB is recreated according to a template, its index goes to the 'ready' channel. -func (pool *dbHashPool) workerCleanUpDirtyDB() { +func (pool *dbHashPool) workerCleanUpReturnedDB() { for dirtyID := range pool.dirty { if dirtyID == stopWorkerMessage { @@ -411,7 +416,7 @@ func (pool *dbHashPool) workerCleanUpDirtyDB() { } } -func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix string, recycleNotReturned bool) (db.TestDatabase, error) { +func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix string) (db.TestDatabase, error) { // ! // dbHashPool locked reg := trace.StartRegion(ctx, "extend_wait_for_lock_hash_pool") @@ -423,8 +428,8 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix index := len(pool.dbs) if index == cap(pool.dbs) { - if recycleNotReturned { - // if recycleNotReturned is allowed, try it instead of returning error + if !pool.forceDBReturn { + // if forceDBReturn is allowed, try it instead of returning error return pool.unsafeRecycleInUseTestDB(ctx) } diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index f51ffcb..36646a1 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -15,7 +15,8 @@ func TestPoolAddGet(t *testing.T) { t.Parallel() ctx := context.Background() - p := pool.NewDBPool(2, "prefix_", 4) + forceReturn := true + p := pool.NewDBPool(2, "prefix_", 4, forceReturn) hash1 := "h1" hash2 := "h2" @@ -83,7 +84,8 @@ func TestPoolAddGetConcurrent(t *testing.T) { } maxPoolSize := 6 - p := pool.NewDBPool(maxPoolSize, "", 4) + forceReturn := true + p := pool.NewDBPool(maxPoolSize, "", 4, forceReturn) var wg sync.WaitGroup sleepDuration := 100 * time.Millisecond @@ -149,7 +151,8 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { } maxPoolSize := 6 - p := pool.NewDBPool(maxPoolSize, "", 4) + forceReturn := true + p := pool.NewDBPool(maxPoolSize, "", 4, forceReturn) var wg sync.WaitGroup @@ -203,7 +206,8 @@ func TestPoolRemoveAll(t *testing.T) { } maxPoolSize := 6 - p := pool.NewDBPool(maxPoolSize, "", 4) + forceReturn := true + p := pool.NewDBPool(maxPoolSize, "", 4, forceReturn) // add DBs sequentially for i := 0; i < maxPoolSize; i++ { @@ -245,7 +249,8 @@ func TestPoolInit(t *testing.T) { maxPoolSize := 100 numOfWorkers := 150 - p := pool.NewDBPool(maxPoolSize, "", numOfWorkers) + forceReturn := true + p := pool.NewDBPool(maxPoolSize, "", numOfWorkers, forceReturn) // we will test 2 ways of adding new DBs for i := 0; i < maxPoolSize/2; i++ { @@ -255,7 +260,7 @@ func TestPoolInit(t *testing.T) { assert.NoError(t, err) // extend pool (= add and get) - _, err = p.ExtendPool(ctx, templateDB1, false /* recycleNotReturned */) + _, err = p.ExtendPool(ctx, templateDB1) assert.NoError(t, err) } @@ -312,21 +317,18 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { maxPoolSize := 40 numOfWorkers := 1 - p := pool.NewDBPool(maxPoolSize, "test_", numOfWorkers) + forceReturn := false + p := pool.NewDBPool(maxPoolSize, "test_", numOfWorkers, forceReturn) p.InitHashPool(ctx, templateDB1, initFunc) for i := 0; i < maxPoolSize; i++ { // add and get freshly added DB - _, err := p.ExtendPool(ctx, templateDB1, false /* recycleNotReturned */) + _, err := p.ExtendPool(ctx, templateDB1) assert.NoError(t, err) } - // extend pool not allowing recycling inUse test DBs - _, err := p.ExtendPool(ctx, templateDB1, false /* recycleNotReturned */) - assert.ErrorIs(t, err, pool.ErrPoolFull) - forceExtend := func(seenIDMap *sync.Map) { - newTestDB1, err := p.ExtendPool(ctx, templateDB1, true /* recycleNotReturned */) + newTestDB1, err := p.ExtendPool(ctx, templateDB1) assert.NoError(t, err) assert.Equal(t, hash1, newTestDB1.TemplateHash) seenIDMap.Store(newTestDB1.ID, true) From 9e638291c487cf80c5a17c33cd10cd844c2b3cfc Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 6 Jul 2023 10:27:20 +0000 Subject: [PATCH 098/160] use cfg struct to create new pool --- pkg/manager/manager.go | 10 +++-- pkg/pool/pool.go | 54 +++++++++++++-------------- pkg/pool/pool_test.go | 85 +++++++++++++++++++++++++++--------------- 3 files changed, 87 insertions(+), 62 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index f9257e6..2ebd970 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -54,10 +54,12 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { wg: sync.WaitGroup{}, templates: templates.NewCollection(), pool: pool.NewDBPool( - config.TestDatabaseMaxPoolSize, - testDBPrefix, - config.NumOfCleaningWorkers, - config.TestDatabaseForceReturn, + pool.PoolConfig{ + MaxPoolSize: config.TestDatabaseMaxPoolSize, + TestDBNamePrefix: testDBPrefix, + NumOfWorkers: config.NumOfCleaningWorkers, + ForceDBReturn: config.TestDatabaseForceReturn, + }, ), connectionCtx: context.TODO(), } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index e5efd43..0bf614c 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -31,26 +31,26 @@ const ( const stopWorkerMessage int = -1 +type PoolConfig struct { + MaxPoolSize int + TestDBNamePrefix string + NumOfWorkers int // Number of cleaning workers (each hash pool has enables this number of workers) + ForceDBReturn bool // Force returning test DB. If set to false, test databases that are 'InUse' can be recycled (in not actually used). +} + type DBPool struct { + PoolConfig + pools map[string]*dbHashPool // map[hash] mutex sync.RWMutex - - maxPoolSize int - dbNamePrefix string - numOfWorkers int // Number of cleaning workers (each hash pool has enables this number of workers) - forceDBReturn bool // Force returning test DB. If set to false, test databases that are 'InUse' can be recycled (in not actually used). } // forceDBReturn set to false will allow reusing test databases that are marked as 'InUse'. // Otherwise, test DB has to be returned when no longer needed and there are higher chances of getting ErrPoolFull when requesting a new DB. -func NewDBPool(maxPoolSize int, testDBNamePrefix string, numberOfWorkers int, forceDBReturn bool) *DBPool { +func NewDBPool(cfg PoolConfig) *DBPool { return &DBPool{ - pools: make(map[string]*dbHashPool), - - maxPoolSize: maxPoolSize, - dbNamePrefix: testDBNamePrefix, - numOfWorkers: numberOfWorkers, - forceDBReturn: forceDBReturn, + pools: make(map[string]*dbHashPool), + PoolConfig: cfg, } } @@ -93,14 +93,14 @@ func (p *DBPool) InitHashPool(ctx context.Context, templateDB db.Database, initD p.mutex.Lock() defer p.mutex.Unlock() - _ = p.initHashPool(ctx, templateDB, initDBFunc, p.forceDBReturn) + _ = p.initHashPool(ctx, templateDB, initDBFunc) } -func (p *DBPool) initHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc, forceDBReturn bool) *dbHashPool { +func (p *DBPool) initHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { // create a new dbHashPool - pool := newDBHashPool(p.maxPoolSize, initDBFunc, templateDB, p.numOfWorkers, forceDBReturn) + pool := newDBHashPool(p.PoolConfig, templateDB, initDBFunc) // and start the cleaning worker - pool.enableWorker(p.numOfWorkers) + pool.enableWorker(p.NumOfWorkers) // pool is ready p.pools[pool.templateDB.TemplateHash] = pool @@ -193,14 +193,14 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in pool := p.pools[hash] if pool == nil { - pool = p.initHashPool(ctx, templateDB, initFunc, p.forceDBReturn) + pool = p.initHashPool(ctx, templateDB, initFunc) } p.mutex.Unlock() // DBPool unlocked // ! - newTestDB, err := pool.extend(ctx, dbStateReady, p.dbNamePrefix) + newTestDB, err := pool.extend(ctx, dbStateReady, p.PoolConfig.TestDBNamePrefix) if err != nil { return err } @@ -234,7 +234,7 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes // ! // because we return it right away, we treat it as 'inUse' - newTestDB, err := pool.extend(ctx, dbStateInUse, p.dbNamePrefix) + newTestDB, err := pool.extend(ctx, dbStateInUse, p.PoolConfig.TestDBNamePrefix) if err != nil { return db.TestDatabase{}, err } @@ -340,15 +340,15 @@ func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) // ! } -func newDBHashPool(maxPoolSize int, recreateDB RecreateDBFunc, templateDB db.Database, numberOfWorkers int, forceDBReturn bool) *dbHashPool { +func newDBHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { return &dbHashPool{ - dbs: make([]existingDB, 0, maxPoolSize), - ready: make(chan int, maxPoolSize), - dirty: make(chan int, maxPoolSize), - recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, recreateDB), + dbs: make([]existingDB, 0, cfg.MaxPoolSize), + ready: make(chan int, cfg.MaxPoolSize), + dirty: make(chan int, cfg.MaxPoolSize), + recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), templateDB: templateDB, - numOfWorkers: numberOfWorkers, - forceDBReturn: forceDBReturn, + numOfWorkers: cfg.NumOfWorkers, + forceDBReturn: cfg.ForceDBReturn, } } @@ -539,7 +539,7 @@ func (p *DBPool) MakeDBName(hash string, id int) string { p.mutex.RLock() p.mutex.RUnlock() - return makeDBName(p.dbNamePrefix, hash, id) + return makeDBName(p.PoolConfig.TestDBNamePrefix, hash, id) } func makeDBName(testDBPrefix string, hash string, id int) string { diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index 36646a1..3f0165b 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -15,8 +15,13 @@ func TestPoolAddGet(t *testing.T) { t.Parallel() ctx := context.Background() - forceReturn := true - p := pool.NewDBPool(2, "prefix_", 4, forceReturn) + cfg := pool.PoolConfig{ + MaxPoolSize: 2, + NumOfWorkers: 4, + TestDBNamePrefix: "prefix_", + ForceDBReturn: true, + } + p := pool.NewDBPool(cfg) hash1 := "h1" hash2 := "h2" @@ -83,9 +88,13 @@ func TestPoolAddGetConcurrent(t *testing.T) { return nil } - maxPoolSize := 6 - forceReturn := true - p := pool.NewDBPool(maxPoolSize, "", 4, forceReturn) + cfg := pool.PoolConfig{ + MaxPoolSize: 6, + NumOfWorkers: 4, + TestDBNamePrefix: "", + ForceDBReturn: true, + } + p := pool.NewDBPool(cfg) var wg sync.WaitGroup sleepDuration := 100 * time.Millisecond @@ -104,7 +113,7 @@ func TestPoolAddGetConcurrent(t *testing.T) { sleepDuration := sleepDuration // add DBs sequentially - for i := 0; i < maxPoolSize; i++ { + for i := 0; i < cfg.MaxPoolSize; i++ { assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) time.Sleep(sleepDuration) @@ -117,13 +126,13 @@ func TestPoolAddGetConcurrent(t *testing.T) { sleepDuration := sleepDuration - db, err := p.GetTestDatabase(ctx, hash, time.Duration(maxPoolSize)*sleepDuration) + db, err := p.GetTestDatabase(ctx, hash, time.Duration(cfg.MaxPoolSize)*sleepDuration) assert.NoError(t, err) assert.Equal(t, hash, db.TemplateHash) t.Logf("got %s %v\n", db.TemplateHash, db.ID) } - for i := 0; i < maxPoolSize; i++ { + for i := 0; i < cfg.MaxPoolSize; i++ { wg.Add(2) go getDB(hash1) go getDB(hash2) @@ -150,14 +159,18 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { return nil } - maxPoolSize := 6 - forceReturn := true - p := pool.NewDBPool(maxPoolSize, "", 4, forceReturn) + cfg := pool.PoolConfig{ + MaxPoolSize: 6, + NumOfWorkers: 4, + TestDBNamePrefix: "", + ForceDBReturn: true, + } + p := pool.NewDBPool(cfg) var wg sync.WaitGroup // add DBs sequentially - for i := 0; i < maxPoolSize/2; i++ { + for i := 0; i < cfg.MaxPoolSize/2; i++ { assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) } @@ -174,7 +187,7 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { assert.NoError(t, p.ReturnTestDatabase(ctx, hash, db.ID)) } - for i := 0; i < maxPoolSize*3; i++ { + for i := 0; i < cfg.MaxPoolSize*3; i++ { wg.Add(2) go getAndReturnDB(hash1) go getAndReturnDB(hash2) @@ -205,12 +218,16 @@ func TestPoolRemoveAll(t *testing.T) { return nil } - maxPoolSize := 6 - forceReturn := true - p := pool.NewDBPool(maxPoolSize, "", 4, forceReturn) + cfg := pool.PoolConfig{ + MaxPoolSize: 6, + NumOfWorkers: 4, + TestDBNamePrefix: "", + ForceDBReturn: true, + } + p := pool.NewDBPool(cfg) // add DBs sequentially - for i := 0; i < maxPoolSize; i++ { + for i := 0; i < cfg.MaxPoolSize; i++ { assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) } @@ -247,13 +264,16 @@ func TestPoolInit(t *testing.T) { return nil } - maxPoolSize := 100 - numOfWorkers := 150 - forceReturn := true - p := pool.NewDBPool(maxPoolSize, "", numOfWorkers, forceReturn) + cfg := pool.PoolConfig{ + MaxPoolSize: 100, + NumOfWorkers: 150, + TestDBNamePrefix: "", + ForceDBReturn: true, + } + p := pool.NewDBPool(cfg) // we will test 2 ways of adding new DBs - for i := 0; i < maxPoolSize/2; i++ { + for i := 0; i < cfg.MaxPoolSize/2; i++ { // add and get freshly added DB assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) _, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) @@ -273,7 +293,7 @@ func TestPoolInit(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - maxPoolSize := maxPoolSize + maxPoolSize := cfg.MaxPoolSize templateHash := templateDB1.TemplateHash for i := 0; i < maxPoolSize; i++ { assert.NoError(t, p.ReturnTestDatabase(ctx, templateHash, i)) @@ -285,7 +305,7 @@ func TestPoolInit(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - maxPoolSize := maxPoolSize + maxPoolSize := cfg.MaxPoolSize templateHash := templateDB1.TemplateHash for i := 0; i < maxPoolSize; i++ { _, err := p.GetTestDatabase(ctx, templateHash, 10*time.Millisecond) @@ -315,13 +335,16 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { return nil } - maxPoolSize := 40 - numOfWorkers := 1 - forceReturn := false - p := pool.NewDBPool(maxPoolSize, "test_", numOfWorkers, forceReturn) + cfg := pool.PoolConfig{ + MaxPoolSize: 40, + NumOfWorkers: 1, + TestDBNamePrefix: "test_", + ForceDBReturn: false, + } + p := pool.NewDBPool(cfg) p.InitHashPool(ctx, templateDB1, initFunc) - for i := 0; i < maxPoolSize; i++ { + for i := 0; i < cfg.MaxPoolSize; i++ { // add and get freshly added DB _, err := p.ExtendPool(ctx, templateDB1) assert.NoError(t, err) @@ -337,7 +360,7 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { // allow for recycling inUse test DBs var wg sync.WaitGroup seenIDMap := sync.Map{} - for i := 0; i < 3*maxPoolSize; i++ { + for i := 0; i < 3*cfg.MaxPoolSize; i++ { wg.Add(1) go func() { defer wg.Done() @@ -347,7 +370,7 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { wg.Wait() - for id := 0; id < maxPoolSize; id++ { + for id := 0; id < cfg.MaxPoolSize; id++ { _, ok := seenIDMap.Load(id) // every index that %5 != 0 should show up at least once assert.True(t, ok, id) From a2238554fb2a00dfdd26916bd60d0ef3047324a4 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 7 Jul 2023 09:33:38 +0000 Subject: [PATCH 099/160] drone: publish image --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 28f80a2..20a5c9a 100644 --- a/.drone.yml +++ b/.drone.yml @@ -30,7 +30,7 @@ alias: - &IMAGE_DEPLOY_ID ${DRONE_REPO,,}:${DRONE_COMMIT_SHA} # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) - - &GCR_PUBLISH_BRANCHES [dev, master] + - &GCR_PUBLISH_BRANCHES [dev, master, aj/pooling-improvements] # Docker registry publish default settings - &GCR_REGISTRY_SETTINGS From 3e7ff61bdc8cbc3cecf07b292e32e8b1133fec8c Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 7 Jul 2023 13:03:51 +0000 Subject: [PATCH 100/160] manager: create new db in background on each get request --- pkg/manager/manager.go | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 2ebd970..5dfd956 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -283,10 +283,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db } // Init a pool with this hash - initDBFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { - return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) - } - m.pool.InitHashPool(ctx, template.Database, initDBFunc) + m.pool.InitHashPool(ctx, template.Database, m.recreateTestDB) lockedTemplate.SetState(ctx, templates.TemplateStateFinalized) m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) @@ -330,10 +327,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData // Template exists, but the pool is not there - // it must have been removed. // It needs to be reinitialized. - initDBFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { - return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) - } - m.pool.InitHashPool(ctx, template.Database, initDBFunc) + m.pool.InitHashPool(ctx, template.Database, m.recreateTestDB) // pool initalized, create one test db testDB, err = m.pool.ExtendPool(ctx, template.Database) @@ -342,6 +336,13 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData } + // before returning create a new test database in background + m.wg.Add(1) + go func(templ *templates.Template) { + defer m.wg.Done() + _ = m.createTestDatabaseFromTemplate(ctx, templ) + }(template) + if err != nil { return db.TestDatabase{}, err } @@ -454,6 +455,10 @@ func (m *Manager) createDatabase(ctx context.Context, dbName string, owner strin return nil } +func (m *Manager) recreateTestDB(ctx context.Context, testDB db.TestDatabase, templateName string) error { + return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) +} + func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { defer trace.StartRegion(ctx, "drop_db").End() @@ -489,11 +494,7 @@ func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template * return ErrInvalidTemplateState } - initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { - return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) - } - - return m.pool.AddTestDatabase(ctx, template.Database, initFunc) + return m.pool.AddTestDatabase(ctx, template.Database, m.recreateTestDB) } // Adds new test databases for a template, intended to be run asynchronously from other operations in a separate goroutine, using the manager's WaitGroup to synchronize for shutdown. From 914671b661da86ebc60b2af6d067c3f5d4f31a9a Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 7 Jul 2023 13:17:28 +0000 Subject: [PATCH 101/160] pool: reset not returned db --- pkg/pool/pool.go | 153 ++++++++++++++++++++++++++++++++---------- pkg/pool/pool_test.go | 1 - 2 files changed, 119 insertions(+), 35 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 0bf614c..601176c 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -5,12 +5,10 @@ import ( "errors" "fmt" "runtime/trace" - "sort" "sync" "time" "github.com/allaboutapps/integresql/pkg/db" - "github.com/allaboutapps/integresql/pkg/util" ) var ( @@ -77,6 +75,7 @@ type dbHashPool struct { dbs []existingDB ready chan int // ID of initalized DBs according to a template, ready to pick them up dirty chan int // ID of returned DBs, need to be recreated to reuse them + inUse chan int // ID of DBs that were given away and are currenly in use recreateDB recreateTestDBFunc templateDB db.Database @@ -164,24 +163,27 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. return } - givenTestDB := pool.dbs[index] + testDB := pool.dbs[index] // sanity check, should never happen - we got this index from 'ready' channel - if givenTestDB.state != dbStateReady { + if testDB.state != dbStateReady { err = ErrInvalidState return } - givenTestDB.state = dbStateInUse - pool.dbs[index] = givenTestDB + testDB.state = dbStateInUse + pool.dbs[index] = testDB - return givenTestDB.TestDatabase, nil + pool.inUse <- index + + return testDB.TestDatabase, nil // dbHashPool unlocked // ! } // AddTestDatabase adds a new test DB to the pool and creates it according to the template. // The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. -// If the pool size has already reached MAX, ErrPoolFull is returned. +// If the pool size has already reached MAX, ErrPoolFull is returned, unless ForceDBReturn flag is set to false. +// Then databases that were given away would get reset (if no DB connection is currently open) and marked as 'Ready'. func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, initFunc RecreateDBFunc) error { hash := templateDB.TemplateHash @@ -196,12 +198,19 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in pool = p.initHashPool(ctx, templateDB, initFunc) } + forceReturn := p.ForceDBReturn p.mutex.Unlock() // DBPool unlocked // ! newTestDB, err := pool.extend(ctx, dbStateReady, p.PoolConfig.TestDBNamePrefix) if err != nil { + if errors.Is(err, ErrPoolFull) && !forceReturn { + // we can try to reset test databases that are 'InUse' + _, err := pool.resetNotReturned(ctx, p.TestDBNamePrefix, false /* shouldKeepInUse */) + return err + } + return err } @@ -229,6 +238,7 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes return db.TestDatabase{}, ErrUnknownHash } + forceReturn := p.ForceDBReturn p.mutex.Unlock() // DBPool unlocked // ! @@ -236,9 +246,16 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes // because we return it right away, we treat it as 'inUse' newTestDB, err := pool.extend(ctx, dbStateInUse, p.PoolConfig.TestDBNamePrefix) if err != nil { + if errors.Is(err, ErrPoolFull) && !forceReturn { + // we can try to reset test databases that are 'InUse' + return pool.resetNotReturned(ctx, p.TestDBNamePrefix, true /* shouldKeepInUse */) + } + return db.TestDatabase{}, err } + pool.inUse <- newTestDB.ID + return newTestDB, nil } @@ -271,7 +288,7 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // DBPool unlocked // ! - if id >= len(pool.dbs) { + if id < 0 || id >= len(pool.dbs) { return ErrInvalidIndex } @@ -292,6 +309,28 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // ! } +// ReturnCleanTestDatabase is used to return a DB that is currently 'InUse' to the pool, +// but has not been modified and is ready to be reused on next GET call. +// Therefore it's not added to 'dirty' channel and is reused as is. +func (p *DBPool) ReturnCleanTestDatabase(ctx context.Context, hash string, id int) error { + + // ! + // DBPool locked + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") + p.mutex.Lock() + reg.End() + pool := p.pools[hash] + + if pool == nil { + // no such pool + p.mutex.Unlock() + return ErrUnknownHash + } + p.mutex.Unlock() + + return pool.returnCleanDB(ctx, id) +} + // RemoveAllWithHash removes a pool with a given template hash. // All background workers belonging to this pool are stopped. func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc func(db.TestDatabase) error) error { @@ -345,6 +384,7 @@ func newDBHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDB dbs: make([]existingDB, 0, cfg.MaxPoolSize), ready: make(chan int, cfg.MaxPoolSize), dirty: make(chan int, cfg.MaxPoolSize), + inUse: make(chan int, cfg.MaxPoolSize), recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), templateDB: templateDB, numOfWorkers: cfg.NumOfWorkers, @@ -416,6 +456,33 @@ func (pool *dbHashPool) workerCleanUpReturnedDB() { } } +func (pool *dbHashPool) returnCleanDB(ctx context.Context, id int) error { + pool.Lock() + defer pool.Unlock() + + if id < 0 || id >= len(pool.dbs) { + return ErrInvalidIndex + } + + // check if db is in the correct state + testDB := pool.dbs[id] + if testDB.state == dbStateReady { + return nil + } + + // if not in use, it will be cleaned up by a worker + if testDB.state != dbStateInUse { + return ErrInvalidState + } + + testDB.state = dbStateReady + pool.dbs[id] = testDB + + return nil + // dbHashPool unlocked + // ! +} + func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix string) (db.TestDatabase, error) { // ! // dbHashPool locked @@ -427,12 +494,6 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix // get index of a next test DB - its ID index := len(pool.dbs) if index == cap(pool.dbs) { - - if !pool.forceDBReturn { - // if forceDBReturn is allowed, try it instead of returning error - return pool.unsafeRecycleInUseTestDB(ctx) - } - return db.TestDatabase{}, ErrPoolFull } @@ -463,35 +524,59 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix // ! } -// unsafeRecycleInUseTestDB searches for a test DB that is in use and that can be dropped and recreated. -// WARNING: pool has to be already locked by a calling function! -func (pool *dbHashPool) unsafeRecycleInUseTestDB(ctx context.Context) (db.TestDatabase, error) { +// resetNotReturned recreates one DB that is 'InUse' and to which no db clients are connected (so it can be dropped). +// If shouldKeepInUse is set to true, the DB state remains 'InUse'. Otherwise, it is marked as 'Ready' +// and can be obtained again with GetTestDatabase request - in such case error is nil but returned db.TestDatabase is empty. +func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix string, shouldKeepInUse bool) (db.TestDatabase, error) { + timeout := 10 * time.Millisecond // arbitrary small timeout not to cause deadlock + var index int + select { + case <-time.After(timeout): + return db.TestDatabase{}, ErrPoolFull + case index = <-pool.inUse: + } + + // ! + // dbHashPool locked + reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") + pool.Lock() + defer pool.Unlock() + reg.End() - dbInUse := util.NewSliceToSortByTime[int]() - for id := 0; id < len(pool.dbs); id++ { - testDB := pool.dbs[id] + // sanity check, should never happen + if index < 0 || index >= len(pool.dbs) { + return db.TestDatabase{}, ErrInvalidIndex + } - if pool.dbs[id].state == dbStateInUse { - dbInUse.Add(testDB.createdAt, testDB.ID) + testDB := pool.dbs[index] + if testDB.state == dbStateReady { + if shouldKeepInUse { + return db.TestDatabase{}, ErrInvalidState } + + return db.TestDatabase{}, nil } - sort.Sort(dbInUse) - for i := 0; i < len(dbInUse); i++ { - id := dbInUse[i].Data - testDB := pool.dbs[id] + if err := pool.recreateDB(ctx, &testDB); err != nil { + return db.TestDatabase{}, err + } - if err := pool.recreateDB(ctx, &testDB); err != nil { - // probably still in use, we will continue to search for another ready to be recreated DB - continue - } - pool.dbs[id] = testDB + if shouldKeepInUse { + testDB.state = dbStateInUse + pool.dbs[index] = testDB + pool.inUse <- index return testDB.TestDatabase, nil } - // we went through all the test DBs and none is ready to be recreated -> pool is full - return db.TestDatabase{}, ErrPoolFull + // if shouldKeepInUse is false, we can add this DB to the ready pool + testDB.state = dbStateReady + pool.dbs[index] = testDB + pool.ready <- index + + return db.TestDatabase{}, nil + // dbHashPool unlocked + // ! } func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error { diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index 3f0165b..f3d715b 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -182,7 +182,6 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { db, err := p.GetTestDatabase(ctx, hash, 3*time.Second) assert.NoError(t, err) assert.Equal(t, hash, db.TemplateHash) - time.Sleep(20 * time.Millisecond) t.Logf("returning %s %v\n", db.TemplateHash, db.ID) assert.NoError(t, p.ReturnTestDatabase(ctx, hash, db.ID)) } From ea2e6e608e1fdad2d9463c0e5a60ce3a22bfc413 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 12 Jul 2023 08:29:36 +0000 Subject: [PATCH 102/160] try recreate used test DB in a loop --- pkg/manager/manager.go | 18 ++++--- pkg/manager/manager_test.go | 4 ++ pkg/pool/pool.go | 99 ++++++++++++++++++++++++++----------- pkg/pool/pool_test.go | 3 +- 4 files changed, 86 insertions(+), 38 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 5dfd956..0a6728d 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -336,17 +336,21 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData } - // before returning create a new test database in background - m.wg.Add(1) - go func(templ *templates.Template) { - defer m.wg.Done() - _ = m.createTestDatabaseFromTemplate(ctx, templ) - }(template) - if err != nil { return db.TestDatabase{}, err } + if !m.config.TestDatabaseForceReturn { + // before returning create a new test database in background + m.wg.Add(1) + go func(templ *templates.Template) { + defer m.wg.Done() + if err := m.createTestDatabaseFromTemplate(ctx, templ); err != nil { + fmt.Printf("integresql: failed to create a new DB in background: %v\n", err) + } + }(template) + } + return testDB, nil } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 2e1fe30..fae36e3 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -615,6 +615,8 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseInitialPoolSize = 3 cfg.TestDatabaseMaxPoolSize = 3 + cfg.TestDatabaseForceReturn = true + cfg.TestDatabaseGetTimeout = 200 * time.Millisecond m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -730,6 +732,7 @@ func TestManagerReturnTestDatabase(t *testing.T) { cfg.TestDatabaseInitialPoolSize = 1 // can be extended, but should first reuse existing cfg.TestDatabaseMaxPoolSize = 3 + cfg.TestDatabaseForceReturn = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -910,6 +913,7 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() // there are no db added in background cfg.TestDatabaseInitialPoolSize = 0 + cfg.TestDatabaseForceReturn = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 601176c..c599806 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -173,7 +173,12 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. testDB.state = dbStateInUse pool.dbs[index] = testDB - pool.inUse <- index + select { + case pool.inUse <- index: + // sent to InUse without blocking + default: + // channel is full + } return testDB.TestDatabase, nil // dbHashPool unlocked @@ -244,7 +249,7 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes // ! // because we return it right away, we treat it as 'inUse' - newTestDB, err := pool.extend(ctx, dbStateInUse, p.PoolConfig.TestDBNamePrefix) + testDB, err := pool.extend(ctx, dbStateInUse, p.PoolConfig.TestDBNamePrefix) if err != nil { if errors.Is(err, ErrPoolFull) && !forceReturn { // we can try to reset test databases that are 'InUse' @@ -254,9 +259,14 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes return db.TestDatabase{}, err } - pool.inUse <- newTestDB.ID + select { + case pool.inUse <- testDB.ID: + // sent to InUse without blocking + default: + // channel is full + } - return newTestDB, nil + return testDB, nil } // ReturnTestDatabase is used to return a DB that is currently 'InUse' to the pool. @@ -384,7 +394,7 @@ func newDBHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDB dbs: make([]existingDB, 0, cfg.MaxPoolSize), ready: make(chan int, cfg.MaxPoolSize), dirty: make(chan int, cfg.MaxPoolSize), - inUse: make(chan int, cfg.MaxPoolSize), + inUse: make(chan int, 3*cfg.MaxPoolSize), // here indexes can be duplicated recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), templateDB: templateDB, numOfWorkers: cfg.NumOfWorkers, @@ -528,43 +538,74 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix // If shouldKeepInUse is set to true, the DB state remains 'InUse'. Otherwise, it is marked as 'Ready' // and can be obtained again with GetTestDatabase request - in such case error is nil but returned db.TestDatabase is empty. func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix string, shouldKeepInUse bool) (db.TestDatabase, error) { - timeout := 10 * time.Millisecond // arbitrary small timeout not to cause deadlock + var testDB existingDB var index int - select { - case <-time.After(timeout): - return db.TestDatabase{}, ErrPoolFull - case index = <-pool.inUse: - } + found := false - // ! - // dbHashPool locked - reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") - pool.Lock() - defer pool.Unlock() - reg.End() + // we want to search in loop for a InUse DB that could be reused + tryTimes := 3 + for i := 0; i < tryTimes; i++ { - // sanity check, should never happen - if index < 0 || index >= len(pool.dbs) { - return db.TestDatabase{}, ErrInvalidIndex - } + timeout := 100 * time.Millisecond // arbitrary small timeout not to cause deadlock - testDB := pool.dbs[index] - if testDB.state == dbStateReady { - if shouldKeepInUse { - return db.TestDatabase{}, ErrInvalidState + select { + case <-time.After(timeout): + return db.TestDatabase{}, ErrPoolFull + case index = <-pool.inUse: } - return db.TestDatabase{}, nil + // ! + // dbHashPool locked + reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") + pool.Lock() + reg.End() + + // sanity check, should never happen + if index < 0 || index >= len(pool.dbs) { + // if something is wrong with the received index, just return, don't try any other time (maybe RemoveAll was requested) + return db.TestDatabase{}, ErrInvalidIndex + } + + testDB = pool.dbs[index] + pool.Unlock() + + if testDB.state == dbStateReady { + // this DB is 'ready' already, we can skip it and search for a dirty one + continue + } + + if err := pool.recreateDB(ctx, &testDB); err != nil { + // this database remains 'InUse' + select { + case pool.inUse <- index: + // sent to InUse without blocking + default: + // channel is full + } + continue + } + + found = true + break } - if err := pool.recreateDB(ctx, &testDB); err != nil { - return db.TestDatabase{}, err + if !found { + return db.TestDatabase{}, ErrPoolFull } + pool.Lock() + defer pool.Unlock() + if shouldKeepInUse { testDB.state = dbStateInUse pool.dbs[index] = testDB - pool.inUse <- index + + select { + case pool.inUse <- index: + // sent to InUse without blocking + default: + // channel is full + } return testDB.TestDatabase, nil } diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index f3d715b..001f39e 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -330,7 +330,7 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { } initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { - t.Log("(re)create ", testDB.Database, ", template name: ", templateName) + t.Log("(re)create ", testDB.Database.Config.Database) return nil } @@ -352,7 +352,6 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { forceExtend := func(seenIDMap *sync.Map) { newTestDB1, err := p.ExtendPool(ctx, templateDB1) assert.NoError(t, err) - assert.Equal(t, hash1, newTestDB1.TemplateHash) seenIDMap.Store(newTestDB1.ID, true) } From ecde004985d0c13306c27204da38084d326bc1af Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 12 Jul 2023 09:57:48 +0000 Subject: [PATCH 103/160] test manager without returning db --- pkg/manager/manager.go | 2 ++ pkg/manager/manager_test.go | 59 ++++++++++++++++++++++++++++++++++++- pkg/pool/pool.go | 2 +- 3 files changed, 61 insertions(+), 2 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 0a6728d..b377cc8 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -228,6 +228,8 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro return ErrManagerNotReady } + m.wg.Wait() + // first remove all DB with this hash if err := m.pool.RemoveAllWithHash(ctx, hash, func(testDB db.TestDatabase) error { return m.dropDatabase(ctx, testDB.Database.Config.Database) diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index fae36e3..618e1ef 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -13,6 +13,7 @@ import ( "github.com/allaboutapps/integresql/pkg/manager" "github.com/lib/pq" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestManagerConnect(t *testing.T) { @@ -652,7 +653,7 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) } -func TestManagerGetTestDatabaseExtendingPool(t *testing.T) { +func TestManagerGetTestDatabaseExtendingPoolForceReturn(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() @@ -661,6 +662,7 @@ func TestManagerGetTestDatabaseExtendingPool(t *testing.T) { // should extend up to 10 on demand cfg.TestDatabaseMaxPoolSize = 10 cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond + // force DB return cfg.TestDatabaseForceReturn = true m, _ := testManagerWithConfig(cfg) @@ -707,6 +709,61 @@ func TestManagerGetTestDatabaseExtendingPool(t *testing.T) { assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) } +func TestManagerGetTestDatabaseDontReturn(t *testing.T) { + + ctx := context.Background() + + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TestDatabaseInitialPoolSize = 5 + cfg.TestDatabaseMaxPoolSize = 5 + // enable reusing old not returned databases + cfg.TestDatabaseForceReturn = false + m, _ := testManagerWithConfig(cfg) + + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) + } + + defer disconnectManager(t, m) + + hash := "hashinghash" + + template, err := m.InitializeTemplateDatabase(ctx, hash) + if err != nil { + t.Fatalf("failed to initialize template database: %v", err) + } + + populateTemplateDB(t, template) + + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } + + var wg sync.WaitGroup + for i := 0; i < cfg.TestDatabaseMaxPoolSize*5; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + testDB, err := m.GetTestDatabase(ctx, hash) + require.NoError(t, err) + db, err := sql.Open("postgres", testDB.Config.ConnectionString()) + assert.NoError(t, err) + + // keep an open DB connection for a while + time.Sleep(200 * time.Millisecond) + + // now disconnect + db.Close() + // don't return + }() + } + wg.Wait() + + // discard the template + assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) +} + func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { ctx := context.Background() diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index c599806..692d1ba 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -543,7 +543,7 @@ func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix strin found := false // we want to search in loop for a InUse DB that could be reused - tryTimes := 3 + tryTimes := 5 for i := 0; i < tryTimes; i++ { timeout := 100 * time.Millisecond // arbitrary small timeout not to cause deadlock From 11f5278b8c10e3df8322cb3c8655f701d6ad8b9c Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 12 Jul 2023 11:09:16 +0000 Subject: [PATCH 104/160] fix create test db context --- pkg/manager/manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index b377cc8..2e49ed2 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -347,7 +347,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData m.wg.Add(1) go func(templ *templates.Template) { defer m.wg.Done() - if err := m.createTestDatabaseFromTemplate(ctx, templ); err != nil { + if err := m.createTestDatabaseFromTemplate(m.connectionCtx, templ); err != nil { fmt.Printf("integresql: failed to create a new DB in background: %v\n", err) } }(template) From 95bc264cdced9b248b5f3f5306814a403f04dfbb Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 12 Jul 2023 13:44:53 +0000 Subject: [PATCH 105/160] increase template finalize timeout --- pkg/manager/manager_config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index face104..8fac160 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -58,7 +58,7 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), - TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 2000)), + TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 20000)), TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), NumOfCleaningWorkers: util.GetEnvAsInt("INTEGRESQL_NUM_OF_CLEANING_WORKERS", 3), TestDatabaseForceReturn: util.GetEnvAsBool("INTEGRESQL_TEST_DB_FORCE_RETURN", false), From 17d327e92aeee3760570ae49921f1d055fe30440 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 12 Jul 2023 14:32:28 +0000 Subject: [PATCH 106/160] restore previous meaning of dirty --- pkg/pool/pool.go | 60 ++++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 692d1ba..9499f82 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -22,9 +22,9 @@ var ( type dbState int // Indicates a current DB state. const ( - dbStateReady dbState = iota // Initialized according to a template and ready to be picked up. - dbStateInUse // Currently in use, can't be reused. - dbStateDirty // Returned to the pool, waiting for the cleaning. + dbStateReady dbState = iota // Initialized according to a template and ready to be picked up. + dbStateInUse // Currently in use, can't be reused. + dbStateWaitingForCleaning // Returned to the pool, waiting for the cleaning. ) const stopWorkerMessage int = -1 @@ -72,10 +72,10 @@ type existingDB struct { // dbHashPool holds a test DB pool for a certain hash. Each dbHashPool is running cleanup workers in background. type dbHashPool struct { - dbs []existingDB - ready chan int // ID of initalized DBs according to a template, ready to pick them up - dirty chan int // ID of returned DBs, need to be recreated to reuse them - inUse chan int // ID of DBs that were given away and are currenly in use + dbs []existingDB + ready chan int // ID of initalized DBs according to a template, ready to pick them up + waitingForCleaning chan int // ID of returned DBs, need to be recreated to reuse them + inUse chan int // ID of DBs that were given away and are currenly in use recreateDB recreateTestDBFunc templateDB db.Database @@ -113,7 +113,7 @@ func (p *DBPool) Stop() { defer p.mutex.Unlock() for _, pool := range p.pools { - close(pool.dirty) + close(pool.waitingForCleaning) pool.wg.Wait() } @@ -308,11 +308,11 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er return ErrInvalidState } - testDB.state = dbStateDirty + testDB.state = dbStateWaitingForCleaning pool.dbs[id] = testDB - // add it to dirty channel, to have it cleaned up by the worker - pool.dirty <- id + // add it to waitingForCleaning channel, to have it cleaned up by the worker + pool.waitingForCleaning <- id return nil // dbHashPool unlocked @@ -321,7 +321,7 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // ReturnCleanTestDatabase is used to return a DB that is currently 'InUse' to the pool, // but has not been modified and is ready to be reused on next GET call. -// Therefore it's not added to 'dirty' channel and is reused as is. +// Therefore it's not added to 'waitingForCleaning' channel and is reused as is. func (p *DBPool) ReturnCleanTestDatabase(ctx context.Context, hash string, id int) error { // ! @@ -391,14 +391,14 @@ func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) func newDBHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { return &dbHashPool{ - dbs: make([]existingDB, 0, cfg.MaxPoolSize), - ready: make(chan int, cfg.MaxPoolSize), - dirty: make(chan int, cfg.MaxPoolSize), - inUse: make(chan int, 3*cfg.MaxPoolSize), // here indexes can be duplicated - recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), - templateDB: templateDB, - numOfWorkers: cfg.NumOfWorkers, - forceDBReturn: cfg.ForceDBReturn, + dbs: make([]existingDB, 0, cfg.MaxPoolSize), + ready: make(chan int, cfg.MaxPoolSize), + waitingForCleaning: make(chan int, cfg.MaxPoolSize), + inUse: make(chan int, 3*cfg.MaxPoolSize), // here indexes can be duplicated + recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), + templateDB: templateDB, + numOfWorkers: cfg.NumOfWorkers, + forceDBReturn: cfg.ForceDBReturn, } } @@ -412,12 +412,12 @@ func (pool *dbHashPool) enableWorker(numberOfWorkers int) { } } -// workerCleanUpReturnedDB reads 'dirty' channel and cleans up a test DB with the received index. +// workerCleanUpReturnedDB reads 'waitingForCleaning' channel and cleans up a test DB with the received index. // When the DB is recreated according to a template, its index goes to the 'ready' channel. func (pool *dbHashPool) workerCleanUpReturnedDB() { - for dirtyID := range pool.dirty { - if dirtyID == stopWorkerMessage { + for id := range pool.waitingForCleaning { + if id == stopWorkerMessage { break } @@ -427,16 +427,16 @@ func (pool *dbHashPool) workerCleanUpReturnedDB() { pool.RLock() regLock.End() - if dirtyID < 0 || dirtyID >= len(pool.dbs) { + if id < 0 || id >= len(pool.dbs) { // sanity check, should never happen pool.RUnlock() task.End() continue } - testDB := pool.dbs[dirtyID] + testDB := pool.dbs[id] pool.RUnlock() - if testDB.state != dbStateDirty { + if testDB.state != dbStateWaitingForCleaning { task.End() continue } @@ -455,7 +455,7 @@ func (pool *dbHashPool) workerCleanUpReturnedDB() { regLock.End() testDB.state = dbStateReady - pool.dbs[dirtyID] = testDB + pool.dbs[id] = testDB pool.Unlock() @@ -570,7 +570,7 @@ func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix strin pool.Unlock() if testDB.state == dbStateReady { - // this DB is 'ready' already, we can skip it and search for a dirty one + // this DB is 'ready' already, we can skip it and search for a waitingForCleaning one continue } @@ -625,7 +625,7 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error // stop the worker // we don't close here because if the remove operation fails, we want to be able to repeat it for i := 0; i < pool.numOfWorkers; i++ { - pool.dirty <- stopWorkerMessage + pool.waitingForCleaning <- stopWorkerMessage } pool.wg.Wait() @@ -653,7 +653,7 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error // close all only if removal of all succeeded pool.dbs = nil - close(pool.dirty) + close(pool.waitingForCleaning) return nil // dbHashPool unlocked From 84fd49ff0e2c4492a0f2572c9768ccdbc6afda1e Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 13 Jul 2023 06:34:26 +0000 Subject: [PATCH 107/160] fix data race --- pkg/manager/manager.go | 6 +++--- pkg/manager/manager_test.go | 5 ++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 2e49ed2..6dc213f 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -345,12 +345,12 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData if !m.config.TestDatabaseForceReturn { // before returning create a new test database in background m.wg.Add(1) - go func(templ *templates.Template) { + go func(ctx context.Context, templ *templates.Template) { defer m.wg.Done() - if err := m.createTestDatabaseFromTemplate(m.connectionCtx, templ); err != nil { + if err := m.createTestDatabaseFromTemplate(ctx, templ); err != nil { fmt.Printf("integresql: failed to create a new DB in background: %v\n", err) } - }(template) + }(m.connectionCtx, template) } return testDB, nil diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 618e1ef..743fcdd 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -469,7 +469,10 @@ func TestManagerGetTestDatabaseConcurrently(t *testing.T) { func TestManagerDiscardTemplateDatabase(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TemplateFinalizeTimeout = 200 * time.Millisecond + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } From 46afe765e12258caa7baeed1e21fae93b64c226f Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 13 Jul 2023 06:48:01 +0000 Subject: [PATCH 108/160] rename inUse to dirty and dirty to waitingForCleaning --- pkg/pool/pool.go | 70 ++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 9499f82..5dc998b 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -23,7 +23,7 @@ type dbState int // Indicates a current DB state. const ( dbStateReady dbState = iota // Initialized according to a template and ready to be picked up. - dbStateInUse // Currently in use, can't be reused. + dbStateDirty // Currently in use. dbStateWaitingForCleaning // Returned to the pool, waiting for the cleaning. ) @@ -33,7 +33,7 @@ type PoolConfig struct { MaxPoolSize int TestDBNamePrefix string NumOfWorkers int // Number of cleaning workers (each hash pool has enables this number of workers) - ForceDBReturn bool // Force returning test DB. If set to false, test databases that are 'InUse' can be recycled (in not actually used). + ForceDBReturn bool // Force returning test DB. If set to false, test databases that are 'dirty' can be recycled (in not actually used). } type DBPool struct { @@ -43,7 +43,7 @@ type DBPool struct { mutex sync.RWMutex } -// forceDBReturn set to false will allow reusing test databases that are marked as 'InUse'. +// forceDBReturn set to false will allow reusing test databases that are marked as 'dirty'. // Otherwise, test DB has to be returned when no longer needed and there are higher chances of getting ErrPoolFull when requesting a new DB. func NewDBPool(cfg PoolConfig) *DBPool { return &DBPool{ @@ -75,7 +75,7 @@ type dbHashPool struct { dbs []existingDB ready chan int // ID of initalized DBs according to a template, ready to pick them up waitingForCleaning chan int // ID of returned DBs, need to be recreated to reuse them - inUse chan int // ID of DBs that were given away and are currenly in use + dirty chan int // ID of DBs that were given away and are currenly in use recreateDB recreateTestDBFunc templateDB db.Database @@ -121,7 +121,7 @@ func (p *DBPool) Stop() { // GetTestDatabase picks up a ready to use test DB. It waits the given timeout until a DB is available. // If there is no DB ready and time elapses, ErrTimeout is returned. -// Otherwise, the obtained test DB is marked as 'InUse' and can be reused only if returned to the pool. +// Otherwise, the obtained test DB is marked as 'dirty' and can be reused only if returned to the pool. func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { // ! @@ -170,12 +170,12 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. return } - testDB.state = dbStateInUse + testDB.state = dbStateDirty pool.dbs[index] = testDB select { - case pool.inUse <- index: - // sent to InUse without blocking + case pool.dirty <- index: + // sent to dirty without blocking default: // channel is full } @@ -211,8 +211,8 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in newTestDB, err := pool.extend(ctx, dbStateReady, p.PoolConfig.TestDBNamePrefix) if err != nil { if errors.Is(err, ErrPoolFull) && !forceReturn { - // we can try to reset test databases that are 'InUse' - _, err := pool.resetNotReturned(ctx, p.TestDBNamePrefix, false /* shouldKeepInUse */) + // we can try to reset test databases that are 'dirty' + _, err := pool.resetNotReturned(ctx, p.TestDBNamePrefix, false /* shouldKeepDirty */) return err } @@ -248,20 +248,20 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes // DBPool unlocked // ! - // because we return it right away, we treat it as 'inUse' - testDB, err := pool.extend(ctx, dbStateInUse, p.PoolConfig.TestDBNamePrefix) + // because we return it right away, we treat it as 'dirty' + testDB, err := pool.extend(ctx, dbStateDirty, p.PoolConfig.TestDBNamePrefix) if err != nil { if errors.Is(err, ErrPoolFull) && !forceReturn { - // we can try to reset test databases that are 'InUse' - return pool.resetNotReturned(ctx, p.TestDBNamePrefix, true /* shouldKeepInUse */) + // we can try to reset test databases that are 'dirty' + return pool.resetNotReturned(ctx, p.TestDBNamePrefix, true /* shouldKeepDirty */) } return db.TestDatabase{}, err } select { - case pool.inUse <- testDB.ID: - // sent to InUse without blocking + case pool.dirty <- testDB.ID: + // sent to dirty without blocking default: // channel is full } @@ -269,9 +269,9 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes return testDB, nil } -// ReturnTestDatabase is used to return a DB that is currently 'InUse' to the pool. +// ReturnTestDatabase is used to return a DB that is currently 'dirty' to the pool. // After successful return, the test DB is cleaned up in the background by a worker. -// If the test DB is in a different state than 'InUse', ErrInvalidState is returned. +// If the test DB is in a different state than 'dirty', ErrInvalidState is returned. func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { // ! @@ -304,7 +304,7 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // check if db is in the correct state testDB := pool.dbs[id] - if testDB.state != dbStateInUse { + if testDB.state != dbStateDirty { return ErrInvalidState } @@ -319,7 +319,7 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // ! } -// ReturnCleanTestDatabase is used to return a DB that is currently 'InUse' to the pool, +// ReturnCleanTestDatabase is used to return a DB that is currently 'dirty' to the pool, // but has not been modified and is ready to be reused on next GET call. // Therefore it's not added to 'waitingForCleaning' channel and is reused as is. func (p *DBPool) ReturnCleanTestDatabase(ctx context.Context, hash string, id int) error { @@ -394,7 +394,7 @@ func newDBHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDB dbs: make([]existingDB, 0, cfg.MaxPoolSize), ready: make(chan int, cfg.MaxPoolSize), waitingForCleaning: make(chan int, cfg.MaxPoolSize), - inUse: make(chan int, 3*cfg.MaxPoolSize), // here indexes can be duplicated + dirty: make(chan int, 3*cfg.MaxPoolSize), // here indexes can be duplicated recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), templateDB: templateDB, numOfWorkers: cfg.NumOfWorkers, @@ -481,7 +481,7 @@ func (pool *dbHashPool) returnCleanDB(ctx context.Context, id int) error { } // if not in use, it will be cleaned up by a worker - if testDB.state != dbStateInUse { + if testDB.state != dbStateDirty { return ErrInvalidState } @@ -534,15 +534,15 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix // ! } -// resetNotReturned recreates one DB that is 'InUse' and to which no db clients are connected (so it can be dropped). -// If shouldKeepInUse is set to true, the DB state remains 'InUse'. Otherwise, it is marked as 'Ready' +// resetNotReturned recreates one DB that is 'dirty' and to which no db clients are connected (so it can be dropped). +// If shouldKeepDirty is set to true, the DB state remains 'dirty'. Otherwise, it is marked as 'Ready' // and can be obtained again with GetTestDatabase request - in such case error is nil but returned db.TestDatabase is empty. -func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix string, shouldKeepInUse bool) (db.TestDatabase, error) { +func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix string, shouldKeepDirty bool) (db.TestDatabase, error) { var testDB existingDB var index int found := false - // we want to search in loop for a InUse DB that could be reused + // we want to search in loop for a dirty DB that could be reused tryTimes := 5 for i := 0; i < tryTimes; i++ { @@ -551,7 +551,7 @@ func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix strin select { case <-time.After(timeout): return db.TestDatabase{}, ErrPoolFull - case index = <-pool.inUse: + case index = <-pool.dirty: } // ! @@ -575,10 +575,10 @@ func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix strin } if err := pool.recreateDB(ctx, &testDB); err != nil { - // this database remains 'InUse' + // this database remains 'dirty' select { - case pool.inUse <- index: - // sent to InUse without blocking + case pool.dirty <- index: + // sent to dirty without blocking default: // channel is full } @@ -596,13 +596,13 @@ func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix strin pool.Lock() defer pool.Unlock() - if shouldKeepInUse { - testDB.state = dbStateInUse + if shouldKeepDirty { + testDB.state = dbStateDirty pool.dbs[index] = testDB select { - case pool.inUse <- index: - // sent to InUse without blocking + case pool.dirty <- index: + // sent to dirty without blocking default: // channel is full } @@ -610,7 +610,7 @@ func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix strin return testDB.TestDatabase, nil } - // if shouldKeepInUse is false, we can add this DB to the ready pool + // if shouldKeepDirty is false, we can add this DB to the ready pool testDB.state = dbStateReady pool.dbs[index] = testDB pool.ready <- index From 8f75afada224d57f6ef15c8f4d54f80d552bc288 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 13 Jul 2023 09:13:40 +0000 Subject: [PATCH 109/160] add restore endpoint --- internal/api/templates/routes.go | 2 + internal/api/templates/templates.go | 27 ++++++ pkg/manager/manager.go | 80 +++++++++++++----- pkg/manager/manager_test.go | 124 ++++++++++++++++++++-------- pkg/pool/pool.go | 16 ++-- pkg/pool/pool_test.go | 94 +++++++++++++++++++++ 6 files changed, 283 insertions(+), 60 deletions(-) diff --git a/internal/api/templates/routes.go b/internal/api/templates/routes.go index 0e66c99..77e4d1f 100644 --- a/internal/api/templates/routes.go +++ b/internal/api/templates/routes.go @@ -10,4 +10,6 @@ func InitRoutes(s *api.Server) { g.DELETE("/:hash", deleteDiscardTemplate(s)) g.GET("/:hash/tests", getTestDatabase(s)) g.DELETE("/:hash/tests/:id", deleteReturnTestDatabase(s)) + g.POST("/:hash/tests/:id/restore", postRestoreTestDatabase(s)) + } diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index cf4fc96..9f3a376 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -150,3 +150,30 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return c.NoContent(http.StatusNoContent) } } + +func postRestoreTestDatabase(s *api.Server) echo.HandlerFunc { + return func(c echo.Context) error { + hash := c.Param("hash") + id, err := strconv.Atoi(c.Param("id")) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, "invalid test database ID") + } + + if err := s.Manager.RestoreTestDatabase(c.Request().Context(), hash, id); err != nil { + switch err { + case manager.ErrManagerNotReady: + return echo.ErrServiceUnavailable + case manager.ErrTemplateNotFound: + return echo.NewHTTPError(http.StatusNotFound, "template not found") + case manager.ErrTestNotFound: + return echo.NewHTTPError(http.StatusNotFound, "test database not found") + case manager.ErrTestDBInUse: + return echo.NewHTTPError(http.StatusLocked, manager.ErrTestDBInUse.Error()) + default: + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) + } + } + + return c.NoContent(http.StatusNoContent) + } +} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 6dc213f..8e577d8 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -356,6 +356,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData return testDB, nil } +// ReturnTestDatabase returns an unchanged test DB to the pool, allowing for reuse without cleaning. func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) error { ctx, task := trace.NewTask(ctx, "return_test_db") defer task.End() @@ -364,21 +365,20 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e return ErrManagerNotReady } - // check if the template exists and is 'ready' + // check if the template exists and is finalized template, found := m.templates.Get(ctx, hash) - if found { - if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != - templates.TemplateStateFinalized { + if !found { + return m.dropDatabaseWithID(ctx, hash, id) + } - return ErrInvalidTemplateState - } + if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != + templates.TemplateStateFinalized { - // template is ready, we can return the testDB to the pool - err := m.pool.ReturnTestDatabase(ctx, hash, id) - if err == nil { - return nil - } + return ErrInvalidTemplateState + } + // template is ready, we can return unchanged testDB to the pool + if err := m.pool.ReturnTestDatabase(ctx, hash, id); err != nil { if !(errors.Is(err, pool.ErrInvalidIndex) || errors.Is(err, pool.ErrUnknownHash)) { // other error is an internal error @@ -386,20 +386,48 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e } // db is not tracked in the pool - // try to drop it if exists below + // try to drop it if exists + return m.dropDatabaseWithID(ctx, hash, id) } - dbName := m.pool.MakeDBName(hash, id) - exists, err := m.checkDatabaseExists(ctx, dbName) - if err != nil { - return err + return nil +} + +// RestoreTestDatabase recreates the test DB according to the template and returns it back to the pool. +func (m *Manager) RestoreTestDatabase(ctx context.Context, hash string, id int) error { + ctx, task := trace.NewTask(ctx, "restore_test_db") + defer task.End() + + if !m.Ready() { + return ErrManagerNotReady } - if !exists { - return ErrTestNotFound + // check if the template exists and is finalized + template, found := m.templates.Get(ctx, hash) + if !found { + return m.dropDatabaseWithID(ctx, hash, id) } - return m.dropDatabase(ctx, dbName) + if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != + templates.TemplateStateFinalized { + + return ErrInvalidTemplateState + } + + // template is ready, we can returb the testDB to the pool and have it cleaned up + if err := m.pool.RestoreTestDatabase(ctx, hash, id); err != nil { + if !(errors.Is(err, pool.ErrInvalidIndex) || + errors.Is(err, pool.ErrUnknownHash)) { + // other error is an internal error + return err + } + + // db is not tracked in the pool + // try to drop it if exists + return m.dropDatabaseWithID(ctx, hash, id) + } + + return nil } func (m *Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) error { @@ -437,6 +465,20 @@ func (m *Manager) ResetAllTracking(ctx context.Context) error { return nil } +func (m *Manager) dropDatabaseWithID(ctx context.Context, hash string, id int) error { + dbName := m.pool.MakeDBName(hash, id) + exists, err := m.checkDatabaseExists(ctx, dbName) + if err != nil { + return err + } + + if !exists { + return ErrTestNotFound + } + + return m.dropDatabase(ctx, dbName) +} + func (m *Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, error) { var exists bool if err := m.db.QueryRowContext(ctx, "SELECT 1 AS exists FROM pg_database WHERE datname = $1", dbName).Scan(&exists); err != nil { diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 743fcdd..21b7b6a 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -11,6 +11,7 @@ import ( "github.com/allaboutapps/integresql/pkg/db" "github.com/allaboutapps/integresql/pkg/manager" + "github.com/allaboutapps/integresql/pkg/pool" "github.com/lib/pq" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -537,7 +538,10 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { ctx := context.Background() - m := testManagerFromEnv() + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TemplateFinalizeTimeout = 200 * time.Millisecond + m, _ := testManagerWithConfig(cfg) + if err := m.Initialize(ctx); err != nil { t.Fatalf("initializing manager failed: %v", err) } @@ -784,54 +788,108 @@ func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { } } -func TestManagerReturnTestDatabase(t *testing.T) { +func TestManagerReturnRestoreTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - // there is just 1 database initially - cfg.TestDatabaseInitialPoolSize = 1 - // can be extended, but should first reuse existing - cfg.TestDatabaseMaxPoolSize = 3 + cfg.TestDatabaseInitialPoolSize = 10 + cfg.NumOfCleaningWorkers = 2 + cfg.TestDatabaseMaxPoolSize = 10 cfg.TestDatabaseForceReturn = true - m, _ := testManagerWithConfig(cfg) + cfg.TestDatabaseGetTimeout = 200 * time.Millisecond - if err := m.Initialize(ctx); err != nil { - t.Fatalf("initializing manager failed: %v", err) + tests := []struct { + name string + giveBackFunc func(m *manager.Manager, ctx context.Context, hash string, id int) error + resultCheck func(row *sql.Row, id int) + }{ + { + name: "Restore", + giveBackFunc: func(m *manager.Manager, ctx context.Context, hash string, id int) error { + return m.RestoreTestDatabase(ctx, hash, id) + }, + resultCheck: func(row *sql.Row, id int) { + assert.NoError(t, row.Err()) + var name string + assert.ErrorIs(t, row.Scan(&name), sql.ErrNoRows, id) + }, + }, + { + name: "Return", + giveBackFunc: func(m *manager.Manager, ctx context.Context, hash string, id int) error { + return m.ReturnTestDatabase(ctx, hash, id) + }, + resultCheck: func(row *sql.Row, id int) { + assert.NoError(t, row.Err(), id) + var name string + assert.NoError(t, row.Scan(&name), id) + assert.Equal(t, "Snufkin", name) + }, + }, } - defer disconnectManager(t, m) + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { - hash := "hashinghash" + m, _ := testManagerWithConfig(cfg) - template, err := m.InitializeTemplateDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to initialize template database: %v", err) - } + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) + } - populateTemplateDB(t, template) + defer disconnectManager(t, m) - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - t.Fatalf("failed to finalize template database: %v", err) - } + hash := "hashinghash" - test, err := m.GetTestDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to get test database: %v", err) - } + template, err := m.InitializeTemplateDatabase(ctx, hash) + if err != nil { + t.Fatalf("failed to initialize template database: %v", err) + } - if err := m.ReturnTestDatabase(ctx, hash, test.ID); err != nil { - t.Fatalf("failed to return test database: %v", err) - } + populateTemplateDB(t, template) - originalID := test.ID + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } - test, err = m.GetTestDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to get additional test database: %v", err) - } + for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { + testDB, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) - if test.ID != originalID { - t.Fatalf("failed to reuse returned test database, got ID %d, want ID %d", test.ID, originalID) + // open the connection and modify the test DB + db, err := sql.Open("postgres", testDB.Config.ConnectionString()) + require.NoError(t, err) + require.NoError(t, db.PingContext(ctx)) + + _, err = db.ExecContext(ctx, `INSERT INTO pilots (id, "name", created_at, updated_at) VALUES ('777a1a87-5ef7-4309-8814-0f1054751177', 'Snufkin', '2023-07-13 09:44:00.548', '2023-07-13 09:44:00.548')`) + assert.NoError(t, err, testDB.ID) + db.Close() + } + + _, err = m.GetTestDatabase(ctx, hash) + assert.ErrorIs(t, err, pool.ErrPoolFull) + + // restore or return test database + for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { + assert.NoError(t, tt.giveBackFunc(m, ctx, hash, i), i) + } + + for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { + testDB, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + + // assert that test db can be get again + // and that it has been cleaned up + db, err := sql.Open("postgres", testDB.Config.ConnectionString()) + require.NoError(t, err) + require.NoError(t, db.PingContext(ctx)) + + row := db.QueryRowContext(ctx, "SELECT name FROM pilots WHERE id = '777a1a87-5ef7-4309-8814-0f1054751177'") + tt.resultCheck(row, testDB.ID) + db.Close() + } + }) } } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 5dc998b..4032078 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -269,10 +269,10 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes return testDB, nil } -// ReturnTestDatabase is used to return a DB that is currently 'dirty' to the pool. -// After successful return, the test DB is cleaned up in the background by a worker. +// RestoreTestDatabase recreates the given test DB and returns it back to the pool. +// To have it recreated, it is added to 'waitingForCleaning' channel. // If the test DB is in a different state than 'dirty', ErrInvalidState is returned. -func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { +func (p *DBPool) RestoreTestDatabase(ctx context.Context, hash string, id int) error { // ! // DBPool locked @@ -319,11 +319,9 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er // ! } -// ReturnCleanTestDatabase is used to return a DB that is currently 'dirty' to the pool, -// but has not been modified and is ready to be reused on next GET call. -// Therefore it's not added to 'waitingForCleaning' channel and is reused as is. -func (p *DBPool) ReturnCleanTestDatabase(ctx context.Context, hash string, id int) error { - +// ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). +// If the test DB is in a different state than 'dirty', ErrInvalidState is returned. +func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { // ! // DBPool locked reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") @@ -488,6 +486,8 @@ func (pool *dbHashPool) returnCleanDB(ctx context.Context, id int) error { testDB.state = dbStateReady pool.dbs[id] = testDB + pool.ready <- id + return nil // dbHashPool unlocked // ! diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index 001f39e..a7f2dd6 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -376,3 +376,97 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { p.Stop() } + +func TestPoolReturnTestDatabase(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + templateDB1 := db.Database{ + TemplateHash: hash1, + Config: db.DatabaseConfig{ + Database: "h1_template", + }, + } + + recreateTimesMap := sync.Map{} + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + times, existing := recreateTimesMap.LoadOrStore(testDB.ID, 1) + if existing { + recreateTimesMap.Store(testDB.ID, times.(int)+1) + } + + return nil + } + + cfg := pool.PoolConfig{ + MaxPoolSize: 40, + NumOfWorkers: 3, + ForceDBReturn: true, + } + p := pool.NewDBPool(cfg) + p.InitHashPool(ctx, templateDB1, initFunc) + + for i := 0; i < cfg.MaxPoolSize; i++ { + testDB, err := p.ExtendPool(ctx, templateDB1) + assert.NoError(t, err) + // return - don't recreate, just bring back directly to the pool + assert.NoError(t, p.ReturnTestDatabase(ctx, hash1, testDB.ID)) + } + + for id := 0; id < cfg.MaxPoolSize; id++ { + recreatedTimes, ok := recreateTimesMap.Load(id) + assert.True(t, ok) + assert.Equal(t, 1, recreatedTimes) // just once to initialize it + } + + p.Stop() +} + +func TestPoolRestoreTestDatabase(t *testing.T) { + t.Parallel() + ctx := context.Background() + + hash1 := "h1" + templateDB1 := db.Database{ + TemplateHash: hash1, + Config: db.DatabaseConfig{ + Database: "h1_template", + }, + } + + recreateTimesMap := sync.Map{} + initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { + times, existing := recreateTimesMap.LoadOrStore(testDB.ID, 1) + if existing { + recreateTimesMap.Store(testDB.ID, times.(int)+1) + } + + return nil + } + + cfg := pool.PoolConfig{ + MaxPoolSize: 40, + NumOfWorkers: 3, + ForceDBReturn: true, + } + p := pool.NewDBPool(cfg) + p.InitHashPool(ctx, templateDB1, initFunc) + + for i := 0; i < cfg.MaxPoolSize; i++ { + testDB, err := p.ExtendPool(ctx, templateDB1) + assert.NoError(t, err) + // restore - add for cleaning + assert.NoError(t, p.RestoreTestDatabase(ctx, hash1, testDB.ID)) + } + + time.Sleep(100 * time.Millisecond) // wait a tiny bit to have all DB cleaned up + + for id := 0; id < cfg.MaxPoolSize; id++ { + recreatedTimes, ok := recreateTimesMap.Load(id) + assert.True(t, ok) + assert.Equal(t, 2, recreatedTimes) // first time to initialize it, second to clean it + } + + p.Stop() +} From 373794c5724f07fc86fb8654dee51188a1478528 Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 13 Jul 2023 09:23:19 +0000 Subject: [PATCH 110/160] don't return error when returning db that is ready --- pkg/pool/pool.go | 58 +++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 4032078..9cff474 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -304,6 +304,10 @@ func (p *DBPool) RestoreTestDatabase(ctx context.Context, hash string, id int) e // check if db is in the correct state testDB := pool.dbs[id] + if testDB.state == dbStateReady { + return nil + } + if testDB.state != dbStateDirty { return ErrInvalidState } @@ -336,7 +340,30 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er } p.mutex.Unlock() - return pool.returnCleanDB(ctx, id) + pool.Lock() + defer pool.Unlock() + + if id < 0 || id >= len(pool.dbs) { + return ErrInvalidIndex + } + + // check if db is in the correct state + testDB := pool.dbs[id] + if testDB.state == dbStateReady { + return nil + } + + // if not in use, it will be cleaned up by a worker + if testDB.state != dbStateDirty { + return ErrInvalidState + } + + testDB.state = dbStateReady + pool.dbs[id] = testDB + + pool.ready <- id + + return nil } // RemoveAllWithHash removes a pool with a given template hash. @@ -464,35 +491,6 @@ func (pool *dbHashPool) workerCleanUpReturnedDB() { } } -func (pool *dbHashPool) returnCleanDB(ctx context.Context, id int) error { - pool.Lock() - defer pool.Unlock() - - if id < 0 || id >= len(pool.dbs) { - return ErrInvalidIndex - } - - // check if db is in the correct state - testDB := pool.dbs[id] - if testDB.state == dbStateReady { - return nil - } - - // if not in use, it will be cleaned up by a worker - if testDB.state != dbStateDirty { - return ErrInvalidState - } - - testDB.state = dbStateReady - pool.dbs[id] = testDB - - pool.ready <- id - - return nil - // dbHashPool unlocked - // ! -} - func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix string) (db.TestDatabase, error) { // ! // dbHashPool locked From a763dec946a28f571bf4396c0bd2c7025110e8fb Mon Sep 17 00:00:00 2001 From: anjankow Date: Thu, 13 Jul 2023 09:31:01 +0000 Subject: [PATCH 111/160] add POST /unlock endpint --- internal/api/templates/routes.go | 4 +++- internal/api/templates/templates.go | 16 ++++++++++------ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/internal/api/templates/routes.go b/internal/api/templates/routes.go index 77e4d1f..8ba9886 100644 --- a/internal/api/templates/routes.go +++ b/internal/api/templates/routes.go @@ -9,7 +9,9 @@ func InitRoutes(s *api.Server) { g.PUT("/:hash", putFinalizeTemplate(s)) g.DELETE("/:hash", deleteDiscardTemplate(s)) g.GET("/:hash/tests", getTestDatabase(s)) - g.DELETE("/:hash/tests/:id", deleteReturnTestDatabase(s)) + g.DELETE("/:hash/tests/:id", deleteReturnTestDatabase(s)) // deprecated, use POST /unlock instead + g.POST("/:hash/tests/:id/restore", postRestoreTestDatabase(s)) + g.POST("/:hash/tests/:id/unlock", postUnlockTestDatabase(s)) } diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 9f3a376..1349242 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -122,6 +122,10 @@ func getTestDatabase(s *api.Server) echo.HandlerFunc { } func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { + return postUnlockTestDatabase(s) +} + +func postRestoreTestDatabase(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { hash := c.Param("hash") id, err := strconv.Atoi(c.Param("id")) @@ -129,10 +133,7 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusBadRequest, "invalid test database ID") } - ctx, cancel := context.WithTimeout(c.Request().Context(), 10*time.Second) - defer cancel() - - if err := s.Manager.ReturnTestDatabase(ctx, hash, id); err != nil { + if err := s.Manager.RestoreTestDatabase(c.Request().Context(), hash, id); err != nil { switch err { case manager.ErrManagerNotReady: return echo.ErrServiceUnavailable @@ -151,7 +152,7 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { } } -func postRestoreTestDatabase(s *api.Server) echo.HandlerFunc { +func postUnlockTestDatabase(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { hash := c.Param("hash") id, err := strconv.Atoi(c.Param("id")) @@ -159,7 +160,10 @@ func postRestoreTestDatabase(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusBadRequest, "invalid test database ID") } - if err := s.Manager.RestoreTestDatabase(c.Request().Context(), hash, id); err != nil { + ctx, cancel := context.WithTimeout(c.Request().Context(), 10*time.Second) + defer cancel() + + if err := s.Manager.ReturnTestDatabase(ctx, hash, id); err != nil { switch err { case manager.ErrManagerNotReady: return echo.ErrServiceUnavailable From e9fd89813b207addbbc376e62409d6dcc05cad5a Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 05:51:59 +0000 Subject: [PATCH 112/160] slit dbHashPool and PoolCollection file --- pkg/manager/manager.go | 4 +- pkg/pool/pool.go | 375 ----------------- pkg/pool/pool_collection.go | 387 ++++++++++++++++++ .../{pool_test.go => pool_collection_test.go} | 16 +- 4 files changed, 397 insertions(+), 385 deletions(-) create mode 100644 pkg/pool/pool_collection.go rename pkg/pool/{pool_test.go => pool_collection_test.go} (97%) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 8e577d8..4ab5377 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -32,7 +32,7 @@ type Manager struct { wg sync.WaitGroup templates *templates.Collection - pool *pool.DBPool + pool *pool.PoolCollection connectionCtx context.Context // DB connection context used for adding initial DBs in background cancelConnectionCtx func() // Cancel function for DB connection context @@ -53,7 +53,7 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { db: nil, wg: sync.WaitGroup{}, templates: templates.NewCollection(), - pool: pool.NewDBPool( + pool: pool.NewPoolCollection( pool.PoolConfig{ MaxPoolSize: config.TestDatabaseMaxPoolSize, TestDBNamePrefix: testDBPrefix, diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 9cff474..701f8a8 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -29,41 +29,6 @@ const ( const stopWorkerMessage int = -1 -type PoolConfig struct { - MaxPoolSize int - TestDBNamePrefix string - NumOfWorkers int // Number of cleaning workers (each hash pool has enables this number of workers) - ForceDBReturn bool // Force returning test DB. If set to false, test databases that are 'dirty' can be recycled (in not actually used). -} - -type DBPool struct { - PoolConfig - - pools map[string]*dbHashPool // map[hash] - mutex sync.RWMutex -} - -// forceDBReturn set to false will allow reusing test databases that are marked as 'dirty'. -// Otherwise, test DB has to be returned when no longer needed and there are higher chances of getting ErrPoolFull when requesting a new DB. -func NewDBPool(cfg PoolConfig) *DBPool { - return &DBPool{ - pools: make(map[string]*dbHashPool), - PoolConfig: cfg, - } -} - -// RecreateDBFunc callback executed when a pool is extended or the DB cleaned up by a worker. -type RecreateDBFunc func(ctx context.Context, testDB db.TestDatabase, templateName string) error - -func makeActualRecreateTestDBFunc(templateName string, userRecreateFunc RecreateDBFunc) recreateTestDBFunc { - return func(ctx context.Context, testDBWrapper *existingDB) error { - testDBWrapper.createdAt = time.Now() - return userRecreateFunc(ctx, testDBWrapper.TestDatabase, templateName) - } -} - -type recreateTestDBFunc func(context.Context, *existingDB) error - type existingDB struct { state dbState createdAt time.Time @@ -87,333 +52,6 @@ type dbHashPool struct { wg sync.WaitGroup } -// InitHashPool creates a new pool with a given template hash and starts the cleanup workers. -func (p *DBPool) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { - p.mutex.Lock() - defer p.mutex.Unlock() - - _ = p.initHashPool(ctx, templateDB, initDBFunc) -} - -func (p *DBPool) initHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { - // create a new dbHashPool - pool := newDBHashPool(p.PoolConfig, templateDB, initDBFunc) - // and start the cleaning worker - pool.enableWorker(p.NumOfWorkers) - - // pool is ready - p.pools[pool.templateDB.TemplateHash] = pool - - return pool -} - -// Stop is used to stop all background workers -func (p *DBPool) Stop() { - p.mutex.Lock() - defer p.mutex.Unlock() - - for _, pool := range p.pools { - close(pool.waitingForCleaning) - pool.wg.Wait() - } - -} - -// GetTestDatabase picks up a ready to use test DB. It waits the given timeout until a DB is available. -// If there is no DB ready and time elapses, ErrTimeout is returned. -// Otherwise, the obtained test DB is marked as 'dirty' and can be reused only if returned to the pool. -func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { - - // ! - // DBPool locked - reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") - p.mutex.RLock() - reg.End() - pool := p.pools[hash] - - if pool == nil { - // no such pool - p.mutex.RUnlock() - err = ErrUnknownHash - return - } - - p.mutex.RUnlock() - // DBPool unlocked - // ! - - var index int - select { - case <-time.After(timeout): - err = ErrTimeout - return - case index = <-pool.ready: - } - - // ! - // dbHashPool locked - reg = trace.StartRegion(ctx, "wait_for_lock_hash_pool") - pool.Lock() - defer pool.Unlock() - reg.End() - - // sanity check, should never happen - if index < 0 || index >= len(pool.dbs) { - err = ErrInvalidIndex - return - } - - testDB := pool.dbs[index] - // sanity check, should never happen - we got this index from 'ready' channel - if testDB.state != dbStateReady { - err = ErrInvalidState - return - } - - testDB.state = dbStateDirty - pool.dbs[index] = testDB - - select { - case pool.dirty <- index: - // sent to dirty without blocking - default: - // channel is full - } - - return testDB.TestDatabase, nil - // dbHashPool unlocked - // ! -} - -// AddTestDatabase adds a new test DB to the pool and creates it according to the template. -// The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. -// If the pool size has already reached MAX, ErrPoolFull is returned, unless ForceDBReturn flag is set to false. -// Then databases that were given away would get reset (if no DB connection is currently open) and marked as 'Ready'. -func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, initFunc RecreateDBFunc) error { - hash := templateDB.TemplateHash - - // ! - // DBPool locked - reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") - p.mutex.Lock() - reg.End() - pool := p.pools[hash] - - if pool == nil { - pool = p.initHashPool(ctx, templateDB, initFunc) - } - - forceReturn := p.ForceDBReturn - p.mutex.Unlock() - // DBPool unlocked - // ! - - newTestDB, err := pool.extend(ctx, dbStateReady, p.PoolConfig.TestDBNamePrefix) - if err != nil { - if errors.Is(err, ErrPoolFull) && !forceReturn { - // we can try to reset test databases that are 'dirty' - _, err := pool.resetNotReturned(ctx, p.TestDBNamePrefix, false /* shouldKeepDirty */) - return err - } - - return err - } - - // and add its index to 'ready' - pool.ready <- newTestDB.ID - - return nil -} - -// AddTestDatabase adds a new test DB to the pool, creates it according to the template, and returns it right away to the caller. -// The new test DB is marked as 'IsUse' and won't be picked up with GetTestDatabase, until it's returned to the pool. -func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { - hash := templateDB.TemplateHash - - // ! - // DBPool locked - reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") - p.mutex.Lock() - reg.End() - pool := p.pools[hash] - - if pool == nil { - // meant to be only for already initialized pools - p.mutex.Unlock() - return db.TestDatabase{}, ErrUnknownHash - } - - forceReturn := p.ForceDBReturn - p.mutex.Unlock() - // DBPool unlocked - // ! - - // because we return it right away, we treat it as 'dirty' - testDB, err := pool.extend(ctx, dbStateDirty, p.PoolConfig.TestDBNamePrefix) - if err != nil { - if errors.Is(err, ErrPoolFull) && !forceReturn { - // we can try to reset test databases that are 'dirty' - return pool.resetNotReturned(ctx, p.TestDBNamePrefix, true /* shouldKeepDirty */) - } - - return db.TestDatabase{}, err - } - - select { - case pool.dirty <- testDB.ID: - // sent to dirty without blocking - default: - // channel is full - } - - return testDB, nil -} - -// RestoreTestDatabase recreates the given test DB and returns it back to the pool. -// To have it recreated, it is added to 'waitingForCleaning' channel. -// If the test DB is in a different state than 'dirty', ErrInvalidState is returned. -func (p *DBPool) RestoreTestDatabase(ctx context.Context, hash string, id int) error { - - // ! - // DBPool locked - reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") - p.mutex.Lock() - reg.End() - pool := p.pools[hash] - - if pool == nil { - // no such pool - p.mutex.Unlock() - return ErrUnknownHash - } - - // ! - // dbHashPool locked - reg = trace.StartRegion(ctx, "wait_for_lock_hash_pool") - pool.Lock() - defer pool.Unlock() - reg.End() - - p.mutex.Unlock() - // DBPool unlocked - // ! - - if id < 0 || id >= len(pool.dbs) { - return ErrInvalidIndex - } - - // check if db is in the correct state - testDB := pool.dbs[id] - if testDB.state == dbStateReady { - return nil - } - - if testDB.state != dbStateDirty { - return ErrInvalidState - } - - testDB.state = dbStateWaitingForCleaning - pool.dbs[id] = testDB - - // add it to waitingForCleaning channel, to have it cleaned up by the worker - pool.waitingForCleaning <- id - - return nil - // dbHashPool unlocked - // ! -} - -// ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). -// If the test DB is in a different state than 'dirty', ErrInvalidState is returned. -func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { - // ! - // DBPool locked - reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") - p.mutex.Lock() - reg.End() - pool := p.pools[hash] - - if pool == nil { - // no such pool - p.mutex.Unlock() - return ErrUnknownHash - } - p.mutex.Unlock() - - pool.Lock() - defer pool.Unlock() - - if id < 0 || id >= len(pool.dbs) { - return ErrInvalidIndex - } - - // check if db is in the correct state - testDB := pool.dbs[id] - if testDB.state == dbStateReady { - return nil - } - - // if not in use, it will be cleaned up by a worker - if testDB.state != dbStateDirty { - return ErrInvalidState - } - - testDB.state = dbStateReady - pool.dbs[id] = testDB - - pool.ready <- id - - return nil -} - -// RemoveAllWithHash removes a pool with a given template hash. -// All background workers belonging to this pool are stopped. -func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc func(db.TestDatabase) error) error { - - // ! - // DBPool locked - p.mutex.Lock() - defer p.mutex.Unlock() - - pool := p.pools[hash] - - if pool == nil { - // no such pool - return ErrUnknownHash - } - - if err := pool.removeAll(removeFunc); err != nil { - return err - } - - // all DBs have been removed, now remove the pool itself - delete(p.pools, hash) - - return nil - // DBPool unlocked - // ! -} - -// RemoveAll removes all tracked pools. -func (p *DBPool) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) error) error { - // ! - // DBPool locked - p.mutex.Lock() - defer p.mutex.Unlock() - - for hash, pool := range p.pools { - if err := pool.removeAll(removeFunc); err != nil { - return err - } - - delete(p.pools, hash) - } - - return nil - // DBPool unlocked - // ! -} - func newDBHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { return &dbHashPool{ dbs: make([]existingDB, 0, cfg.MaxPoolSize), @@ -657,16 +295,3 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error // dbHashPool unlocked // ! } - -// MakeDBName makes a test DB name with the configured prefix, template hash and ID of the DB. -func (p *DBPool) MakeDBName(hash string, id int) string { - p.mutex.RLock() - p.mutex.RUnlock() - - return makeDBName(p.PoolConfig.TestDBNamePrefix, hash, id) -} - -func makeDBName(testDBPrefix string, hash string, id int) string { - // db name has an ID in suffix - return fmt.Sprintf("%s%s_%03d", testDBPrefix, hash, id) -} diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go new file mode 100644 index 0000000..424940f --- /dev/null +++ b/pkg/pool/pool_collection.go @@ -0,0 +1,387 @@ +package pool + +import ( + "context" + "errors" + "fmt" + "runtime/trace" + "sync" + "time" + + "github.com/allaboutapps/integresql/pkg/db" +) + +type PoolConfig struct { + MaxPoolSize int + TestDBNamePrefix string + NumOfWorkers int // Number of cleaning workers (each hash pool has enables this number of workers) + ForceDBReturn bool // Force returning test DB. If set to false, test databases that are 'dirty' can be recycled (in not actually used). +} + +type PoolCollection struct { + PoolConfig + + pools map[string]*dbHashPool // map[hash] + mutex sync.RWMutex +} + +// forceDBReturn set to false will allow reusing test databases that are marked as 'dirty'. +// Otherwise, test DB has to be returned when no longer needed and there are higher chances of getting ErrPoolFull when requesting a new DB. +func NewPoolCollection(cfg PoolConfig) *PoolCollection { + return &PoolCollection{ + pools: make(map[string]*dbHashPool), + PoolConfig: cfg, + } +} + +// RecreateDBFunc callback executed when a pool is extended or the DB cleaned up by a worker. +type RecreateDBFunc func(ctx context.Context, testDB db.TestDatabase, templateName string) error + +func makeActualRecreateTestDBFunc(templateName string, userRecreateFunc RecreateDBFunc) recreateTestDBFunc { + return func(ctx context.Context, testDBWrapper *existingDB) error { + testDBWrapper.createdAt = time.Now() + return userRecreateFunc(ctx, testDBWrapper.TestDatabase, templateName) + } +} + +type recreateTestDBFunc func(context.Context, *existingDB) error + +// InitHashPool creates a new pool with a given template hash and starts the cleanup workers. +func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { + p.mutex.Lock() + defer p.mutex.Unlock() + + _ = p.initHashPool(ctx, templateDB, initDBFunc) +} + +func (p *PoolCollection) initHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { + // create a new dbHashPool + pool := newDBHashPool(p.PoolConfig, templateDB, initDBFunc) + // and start the cleaning worker + pool.enableWorker(p.NumOfWorkers) + + // pool is ready + p.pools[pool.templateDB.TemplateHash] = pool + + return pool +} + +// Stop is used to stop all background workers +func (p *PoolCollection) Stop() { + p.mutex.Lock() + defer p.mutex.Unlock() + + for _, pool := range p.pools { + close(pool.waitingForCleaning) + pool.wg.Wait() + } + +} + +// GetTestDatabase picks up a ready to use test DB. It waits the given timeout until a DB is available. +// If there is no DB ready and time elapses, ErrTimeout is returned. +// Otherwise, the obtained test DB is marked as 'dirty' and can be reused only if returned to the pool. +func (p *PoolCollection) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { + + // ! + // PoolCollection locked + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") + p.mutex.RLock() + reg.End() + pool := p.pools[hash] + + if pool == nil { + // no such pool + p.mutex.RUnlock() + err = ErrUnknownHash + return + } + + p.mutex.RUnlock() + // PoolCollection unlocked + // ! + + var index int + select { + case <-time.After(timeout): + err = ErrTimeout + return + case index = <-pool.ready: + } + + // ! + // dbHashPool locked + reg = trace.StartRegion(ctx, "wait_for_lock_hash_pool") + pool.Lock() + defer pool.Unlock() + reg.End() + + // sanity check, should never happen + if index < 0 || index >= len(pool.dbs) { + err = ErrInvalidIndex + return + } + + testDB := pool.dbs[index] + // sanity check, should never happen - we got this index from 'ready' channel + if testDB.state != dbStateReady { + err = ErrInvalidState + return + } + + testDB.state = dbStateDirty + pool.dbs[index] = testDB + + select { + case pool.dirty <- index: + // sent to dirty without blocking + default: + // channel is full + } + + return testDB.TestDatabase, nil + // dbHashPool unlocked + // ! +} + +// AddTestDatabase adds a new test DB to the pool and creates it according to the template. +// The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. +// If the pool size has already reached MAX, ErrPoolFull is returned, unless ForceDBReturn flag is set to false. +// Then databases that were given away would get reset (if no DB connection is currently open) and marked as 'Ready'. +func (p *PoolCollection) AddTestDatabase(ctx context.Context, templateDB db.Database, initFunc RecreateDBFunc) error { + hash := templateDB.TemplateHash + + // ! + // PoolCollection locked + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") + p.mutex.Lock() + reg.End() + pool := p.pools[hash] + + if pool == nil { + pool = p.initHashPool(ctx, templateDB, initFunc) + } + + forceReturn := p.ForceDBReturn + p.mutex.Unlock() + // PoolCollection unlocked + // ! + + newTestDB, err := pool.extend(ctx, dbStateReady, p.PoolConfig.TestDBNamePrefix) + if err != nil { + if errors.Is(err, ErrPoolFull) && !forceReturn { + // we can try to reset test databases that are 'dirty' + _, err := pool.resetNotReturned(ctx, p.TestDBNamePrefix, false /* shouldKeepDirty */) + return err + } + + return err + } + + // and add its index to 'ready' + pool.ready <- newTestDB.ID + + return nil +} + +// AddTestDatabase adds a new test DB to the pool, creates it according to the template, and returns it right away to the caller. +// The new test DB is marked as 'IsUse' and won't be picked up with GetTestDatabase, until it's returned to the pool. +func (p *PoolCollection) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { + hash := templateDB.TemplateHash + + // ! + // PoolCollection locked + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") + p.mutex.Lock() + reg.End() + pool := p.pools[hash] + + if pool == nil { + // meant to be only for already initialized pools + p.mutex.Unlock() + return db.TestDatabase{}, ErrUnknownHash + } + + forceReturn := p.ForceDBReturn + p.mutex.Unlock() + // PoolCollection unlocked + // ! + + // because we return it right away, we treat it as 'dirty' + testDB, err := pool.extend(ctx, dbStateDirty, p.PoolConfig.TestDBNamePrefix) + if err != nil { + if errors.Is(err, ErrPoolFull) && !forceReturn { + // we can try to reset test databases that are 'dirty' + return pool.resetNotReturned(ctx, p.TestDBNamePrefix, true /* shouldKeepDirty */) + } + + return db.TestDatabase{}, err + } + + select { + case pool.dirty <- testDB.ID: + // sent to dirty without blocking + default: + // channel is full + } + + return testDB, nil +} + +// RestoreTestDatabase recreates the given test DB and returns it back to the pool. +// To have it recreated, it is added to 'waitingForCleaning' channel. +// If the test DB is in a different state than 'dirty', ErrInvalidState is returned. +func (p *PoolCollection) RestoreTestDatabase(ctx context.Context, hash string, id int) error { + + // ! + // PoolCollection locked + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") + p.mutex.Lock() + reg.End() + pool := p.pools[hash] + + if pool == nil { + // no such pool + p.mutex.Unlock() + return ErrUnknownHash + } + + // ! + // dbHashPool locked + reg = trace.StartRegion(ctx, "wait_for_lock_hash_pool") + pool.Lock() + defer pool.Unlock() + reg.End() + + p.mutex.Unlock() + // PoolCollection unlocked + // ! + + if id < 0 || id >= len(pool.dbs) { + return ErrInvalidIndex + } + + // check if db is in the correct state + testDB := pool.dbs[id] + if testDB.state == dbStateReady { + return nil + } + + if testDB.state != dbStateDirty { + return ErrInvalidState + } + + testDB.state = dbStateWaitingForCleaning + pool.dbs[id] = testDB + + // add it to waitingForCleaning channel, to have it cleaned up by the worker + pool.waitingForCleaning <- id + + return nil + // dbHashPool unlocked + // ! +} + +// ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). +// If the test DB is in a different state than 'dirty', ErrInvalidState is returned. +func (p *PoolCollection) ReturnTestDatabase(ctx context.Context, hash string, id int) error { + // ! + // PoolCollection locked + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") + p.mutex.Lock() + reg.End() + pool := p.pools[hash] + + if pool == nil { + // no such pool + p.mutex.Unlock() + return ErrUnknownHash + } + p.mutex.Unlock() + + pool.Lock() + defer pool.Unlock() + + if id < 0 || id >= len(pool.dbs) { + return ErrInvalidIndex + } + + // check if db is in the correct state + testDB := pool.dbs[id] + if testDB.state == dbStateReady { + return nil + } + + // if not in use, it will be cleaned up by a worker + if testDB.state != dbStateDirty { + return ErrInvalidState + } + + testDB.state = dbStateReady + pool.dbs[id] = testDB + + pool.ready <- id + + return nil +} + +// RemoveAllWithHash removes a pool with a given template hash. +// All background workers belonging to this pool are stopped. +func (p *PoolCollection) RemoveAllWithHash(ctx context.Context, hash string, removeFunc func(db.TestDatabase) error) error { + + // ! + // PoolCollection locked + p.mutex.Lock() + defer p.mutex.Unlock() + + pool := p.pools[hash] + + if pool == nil { + // no such pool + return ErrUnknownHash + } + + if err := pool.removeAll(removeFunc); err != nil { + return err + } + + // all DBs have been removed, now remove the pool itself + delete(p.pools, hash) + + return nil + // PoolCollection unlocked + // ! +} + +// RemoveAll removes all tracked pools. +func (p *PoolCollection) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) error) error { + // ! + // PoolCollection locked + p.mutex.Lock() + defer p.mutex.Unlock() + + for hash, pool := range p.pools { + if err := pool.removeAll(removeFunc); err != nil { + return err + } + + delete(p.pools, hash) + } + + return nil + // PoolCollection unlocked + // ! +} + +// MakeDBName makes a test DB name with the configured prefix, template hash and ID of the DB. +func (p *PoolCollection) MakeDBName(hash string, id int) string { + p.mutex.RLock() + p.mutex.RUnlock() + + return makeDBName(p.PoolConfig.TestDBNamePrefix, hash, id) +} + +func makeDBName(testDBPrefix string, hash string, id int) string { + // db name has an ID in suffix + return fmt.Sprintf("%s%s_%03d", testDBPrefix, hash, id) +} diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_collection_test.go similarity index 97% rename from pkg/pool/pool_test.go rename to pkg/pool/pool_collection_test.go index a7f2dd6..c34ccd1 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_collection_test.go @@ -21,7 +21,7 @@ func TestPoolAddGet(t *testing.T) { TestDBNamePrefix: "prefix_", ForceDBReturn: true, } - p := pool.NewDBPool(cfg) + p := pool.NewPoolCollection(cfg) hash1 := "h1" hash2 := "h2" @@ -94,7 +94,7 @@ func TestPoolAddGetConcurrent(t *testing.T) { TestDBNamePrefix: "", ForceDBReturn: true, } - p := pool.NewDBPool(cfg) + p := pool.NewPoolCollection(cfg) var wg sync.WaitGroup sleepDuration := 100 * time.Millisecond @@ -165,7 +165,7 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { TestDBNamePrefix: "", ForceDBReturn: true, } - p := pool.NewDBPool(cfg) + p := pool.NewPoolCollection(cfg) var wg sync.WaitGroup @@ -223,7 +223,7 @@ func TestPoolRemoveAll(t *testing.T) { TestDBNamePrefix: "", ForceDBReturn: true, } - p := pool.NewDBPool(cfg) + p := pool.NewPoolCollection(cfg) // add DBs sequentially for i := 0; i < cfg.MaxPoolSize; i++ { @@ -269,7 +269,7 @@ func TestPoolInit(t *testing.T) { TestDBNamePrefix: "", ForceDBReturn: true, } - p := pool.NewDBPool(cfg) + p := pool.NewPoolCollection(cfg) // we will test 2 ways of adding new DBs for i := 0; i < cfg.MaxPoolSize/2; i++ { @@ -340,7 +340,7 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { TestDBNamePrefix: "test_", ForceDBReturn: false, } - p := pool.NewDBPool(cfg) + p := pool.NewPoolCollection(cfg) p.InitHashPool(ctx, templateDB1, initFunc) for i := 0; i < cfg.MaxPoolSize; i++ { @@ -404,7 +404,7 @@ func TestPoolReturnTestDatabase(t *testing.T) { NumOfWorkers: 3, ForceDBReturn: true, } - p := pool.NewDBPool(cfg) + p := pool.NewPoolCollection(cfg) p.InitHashPool(ctx, templateDB1, initFunc) for i := 0; i < cfg.MaxPoolSize; i++ { @@ -450,7 +450,7 @@ func TestPoolRestoreTestDatabase(t *testing.T) { NumOfWorkers: 3, ForceDBReturn: true, } - p := pool.NewDBPool(cfg) + p := pool.NewPoolCollection(cfg) p.InitHashPool(ctx, templateDB1, initFunc) for i := 0; i < cfg.MaxPoolSize; i++ { From 758e63eaa11cb1a094f2e0480b8b161146dbe212 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 06:04:56 +0000 Subject: [PATCH 113/160] rename /restore to /reset --- internal/api/templates/routes.go | 2 +- internal/api/templates/templates.go | 4 ++-- pkg/manager/manager.go | 8 ++++---- pkg/manager/manager_test.go | 8 ++++---- pkg/pool/pool_collection.go | 4 ++-- pkg/pool/pool_collection_test.go | 6 +++--- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/internal/api/templates/routes.go b/internal/api/templates/routes.go index 8ba9886..33a26c5 100644 --- a/internal/api/templates/routes.go +++ b/internal/api/templates/routes.go @@ -11,7 +11,7 @@ func InitRoutes(s *api.Server) { g.GET("/:hash/tests", getTestDatabase(s)) g.DELETE("/:hash/tests/:id", deleteReturnTestDatabase(s)) // deprecated, use POST /unlock instead - g.POST("/:hash/tests/:id/restore", postRestoreTestDatabase(s)) + g.POST("/:hash/tests/:id/reset", postResetTestDatabase(s)) g.POST("/:hash/tests/:id/unlock", postUnlockTestDatabase(s)) } diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 1349242..b33ad74 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -125,7 +125,7 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return postUnlockTestDatabase(s) } -func postRestoreTestDatabase(s *api.Server) echo.HandlerFunc { +func postResetTestDatabase(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { hash := c.Param("hash") id, err := strconv.Atoi(c.Param("id")) @@ -133,7 +133,7 @@ func postRestoreTestDatabase(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusBadRequest, "invalid test database ID") } - if err := s.Manager.RestoreTestDatabase(c.Request().Context(), hash, id); err != nil { + if err := s.Manager.ResetTestDatabase(c.Request().Context(), hash, id); err != nil { switch err { case manager.ErrManagerNotReady: return echo.ErrServiceUnavailable diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 4ab5377..fcc1d10 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -393,9 +393,9 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e return nil } -// RestoreTestDatabase recreates the test DB according to the template and returns it back to the pool. -func (m *Manager) RestoreTestDatabase(ctx context.Context, hash string, id int) error { - ctx, task := trace.NewTask(ctx, "restore_test_db") +// ResetTestDatabase recreates the test DB according to the template and returns it back to the pool. +func (m *Manager) ResetTestDatabase(ctx context.Context, hash string, id int) error { + ctx, task := trace.NewTask(ctx, "reset_test_db") defer task.End() if !m.Ready() { @@ -415,7 +415,7 @@ func (m *Manager) RestoreTestDatabase(ctx context.Context, hash string, id int) } // template is ready, we can returb the testDB to the pool and have it cleaned up - if err := m.pool.RestoreTestDatabase(ctx, hash, id); err != nil { + if err := m.pool.ResetTestDatabase(ctx, hash, id); err != nil { if !(errors.Is(err, pool.ErrInvalidIndex) || errors.Is(err, pool.ErrUnknownHash)) { // other error is an internal error diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 21b7b6a..55d707f 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -788,7 +788,7 @@ func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { } } -func TestManagerReturnRestoreTestDatabase(t *testing.T) { +func TestManagerReturnResetTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() @@ -804,9 +804,9 @@ func TestManagerReturnRestoreTestDatabase(t *testing.T) { resultCheck func(row *sql.Row, id int) }{ { - name: "Restore", + name: "Reset", giveBackFunc: func(m *manager.Manager, ctx context.Context, hash string, id int) error { - return m.RestoreTestDatabase(ctx, hash, id) + return m.ResetTestDatabase(ctx, hash, id) }, resultCheck: func(row *sql.Row, id int) { assert.NoError(t, row.Err()) @@ -870,7 +870,7 @@ func TestManagerReturnRestoreTestDatabase(t *testing.T) { _, err = m.GetTestDatabase(ctx, hash) assert.ErrorIs(t, err, pool.ErrPoolFull) - // restore or return test database + // reset or return test database for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { assert.NoError(t, tt.giveBackFunc(m, ctx, hash, i), i) } diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index 424940f..aea2413 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -228,10 +228,10 @@ func (p *PoolCollection) ExtendPool(ctx context.Context, templateDB db.Database) return testDB, nil } -// RestoreTestDatabase recreates the given test DB and returns it back to the pool. +// ResetTestDatabase recreates the given test DB and returns it back to the pool. // To have it recreated, it is added to 'waitingForCleaning' channel. // If the test DB is in a different state than 'dirty', ErrInvalidState is returned. -func (p *PoolCollection) RestoreTestDatabase(ctx context.Context, hash string, id int) error { +func (p *PoolCollection) ResetTestDatabase(ctx context.Context, hash string, id int) error { // ! // PoolCollection locked diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index c34ccd1..b5f3c49 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -423,7 +423,7 @@ func TestPoolReturnTestDatabase(t *testing.T) { p.Stop() } -func TestPoolRestoreTestDatabase(t *testing.T) { +func TestPoolResetTestDatabase(t *testing.T) { t.Parallel() ctx := context.Background() @@ -456,8 +456,8 @@ func TestPoolRestoreTestDatabase(t *testing.T) { for i := 0; i < cfg.MaxPoolSize; i++ { testDB, err := p.ExtendPool(ctx, templateDB1) assert.NoError(t, err) - // restore - add for cleaning - assert.NoError(t, p.RestoreTestDatabase(ctx, hash1, testDB.ID)) + // reset - add for cleaning + assert.NoError(t, p.ResetTestDatabase(ctx, hash1, testDB.ID)) } time.Sleep(100 * time.Millisecond) // wait a tiny bit to have all DB cleaned up From c34f7688b6cda2a8210e13eb9c98c4ed10ac0536 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 06:41:36 +0000 Subject: [PATCH 114/160] add enableDBReset param to templates --- internal/api/templates/templates.go | 5 ++-- pkg/manager/helpers_test.go | 7 +++-- pkg/manager/manager.go | 23 ++++++++++----- pkg/manager/manager_test.go | 36 +++++++++++------------ pkg/templates/template.go | 20 +++++++++++-- pkg/templates/template_collection.go | 6 ++-- pkg/templates/template_collection_test.go | 8 +++-- pkg/templates/template_test.go | 5 ++-- 8 files changed, 67 insertions(+), 43 deletions(-) diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index b33ad74..5025169 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -14,7 +14,8 @@ import ( func postInitializeTemplate(s *api.Server) echo.HandlerFunc { type requestPayload struct { - Hash string `json:"hash"` + Hash string `json:"hash"` + EnableDBReset bool `json:"enableReset"` } return func(c echo.Context) error { @@ -31,7 +32,7 @@ func postInitializeTemplate(s *api.Server) echo.HandlerFunc { ctx, cancel := context.WithTimeout(c.Request().Context(), 30*time.Second) defer cancel() - template, err := s.Manager.InitializeTemplateDatabase(ctx, payload.Hash) + template, err := s.Manager.InitializeTemplateDatabase(ctx, payload.Hash, payload.EnableDBReset) if err != nil { switch err { case manager.ErrManagerNotReady: diff --git a/pkg/manager/helpers_test.go b/pkg/manager/helpers_test.go index 27c5048..fa317f7 100644 --- a/pkg/manager/helpers_test.go +++ b/pkg/manager/helpers_test.go @@ -51,9 +51,12 @@ func disconnectManager(t *testing.T, m *manager.Manager) { } -func initTemplateDB(ctx context.Context, errs chan<- error, m *manager.Manager) { +func initTemplateDB(ctx context.Context, errs chan<- error, m *manager.Manager, enableDBReset ...bool) { - template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash") + // true by default + enableDBResetFlag := !(len(enableDBReset) > 0 && !enableDBReset[0]) + + template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash", enableDBResetFlag) if err != nil { errs <- err return diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index fcc1d10..0ed2150 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -178,7 +178,7 @@ func (m *Manager) Initialize(ctx context.Context) error { return nil } -func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { +func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, enableDBReset bool) (db.TemplateDatabase, error) { ctx, task := trace.NewTask(ctx, "initialize_template_db") defer task.End() @@ -187,12 +187,15 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( } dbName := m.makeTemplateDatabaseName(hash) - templateConfig := db.DatabaseConfig{ - Host: m.config.ManagerDatabaseConfig.Host, - Port: m.config.ManagerDatabaseConfig.Port, - Username: m.config.ManagerDatabaseConfig.Username, - Password: m.config.ManagerDatabaseConfig.Password, - Database: dbName, + templateConfig := templates.TemplateConfig{ + DatabaseConfig: db.DatabaseConfig{ + Host: m.config.ManagerDatabaseConfig.Host, + Port: m.config.ManagerDatabaseConfig.Port, + Username: m.config.ManagerDatabaseConfig.Username, + Password: m.config.ManagerDatabaseConfig.Password, + Database: dbName, + }, + ResetEnabled: enableDBReset, } added, unlock := m.templates.Push(ctx, hash, templateConfig) @@ -214,7 +217,7 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( return db.TemplateDatabase{ Database: db.Database{ TemplateHash: hash, - Config: templateConfig, + Config: templateConfig.DatabaseConfig, }, }, nil } @@ -408,6 +411,10 @@ func (m *Manager) ResetTestDatabase(ctx context.Context, hash string, id int) er return m.dropDatabaseWithID(ctx, hash, id) } + if !template.IsResetEnabled(ctx) { + return nil + } + if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != templates.TemplateStateFinalized { diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 55d707f..b26635a 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -106,7 +106,7 @@ func TestManagerInitializeTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -128,7 +128,7 @@ func TestManagerInitializeTemplateDatabaseTimeout(t *testing.T) { ctxt, cancel := context.WithTimeout(ctx, 10*time.Nanosecond) defer cancel() - _, err := m.InitializeTemplateDatabase(ctxt, hash) + _, err := m.InitializeTemplateDatabase(ctxt, hash, true /* enableDBReset */) if err != context.DeadlineExceeded { t.Fatalf("received unexpected error, got %v, want %v", err, context.DeadlineExceeded) } @@ -204,7 +204,7 @@ func TestManagerFinalizeTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -286,7 +286,7 @@ func TestManagerGetTestDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -322,7 +322,7 @@ func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -354,7 +354,7 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -416,7 +416,7 @@ func TestManagerGetTestDatabaseConcurrently(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -482,7 +482,7 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -550,7 +550,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -606,7 +606,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { t.Fatalf("finalize template should not work: %v", err) } - _, err = m.InitializeTemplateDatabase(ctx, hash) + _, err = m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("reinitialize after discard template database should work: %v", err) } @@ -635,7 +635,7 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -681,7 +681,7 @@ func TestManagerGetTestDatabaseExtendingPoolForceReturn(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -735,7 +735,7 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -842,7 +842,7 @@ func TestManagerReturnResetTestDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -905,7 +905,7 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -953,7 +953,7 @@ func TestManagerReturnUnknownTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -983,7 +983,7 @@ func TestManagerMultiFinalize(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -1042,7 +1042,7 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } diff --git a/pkg/templates/template.go b/pkg/templates/template.go index 277bfb2..bf8c97d 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -18,6 +18,7 @@ const ( ) type Template struct { + TemplateConfig db.Database state TemplateState @@ -25,16 +26,29 @@ type Template struct { mutex sync.RWMutex } -func NewTemplate(database db.Database) *Template { +type TemplateConfig struct { + db.DatabaseConfig + ResetEnabled bool +} + +func NewTemplate(hash string, config TemplateConfig) *Template { t := &Template{ - Database: database, - state: TemplateStateInit, + TemplateConfig: config, + Database: db.Database{TemplateHash: hash, Config: config.DatabaseConfig}, + state: TemplateStateInit, } t.cond = sync.NewCond(&t.mutex) return t } +func (t *Template) IsResetEnabled(ctx context.Context) bool { + t.mutex.RLock() + defer t.mutex.RUnlock() + + return t.ResetEnabled +} + // GetState locks the template and checks its state. func (t *Template) GetState(ctx context.Context) TemplateState { t.mutex.RLock() diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go index acf7a28..7b41af5 100644 --- a/pkg/templates/template_collection.go +++ b/pkg/templates/template_collection.go @@ -4,8 +4,6 @@ import ( "context" "runtime/trace" "sync" - - "github.com/allaboutapps/integresql/pkg/db" ) type Collection struct { @@ -27,7 +25,7 @@ func NewCollection() *Collection { // Returns added=false, if the template has been there already. // In such case, it is not overwritten! To replace a template, first remove it (via Pop) and then Push again. // This function locks the collection and no matter what is its output, the unlock function needs to be called to release the lock. -func (tc *Collection) Push(ctx context.Context, hash string, template db.DatabaseConfig) (added bool, unlock Unlock) { +func (tc *Collection) Push(ctx context.Context, hash string, config TemplateConfig) (added bool, unlock Unlock) { reg := trace.StartRegion(ctx, "get_template_lock") tc.collMutex.Lock() @@ -41,7 +39,7 @@ func (tc *Collection) Push(ctx context.Context, hash string, template db.Databas return false, unlock } - tc.templates[hash] = NewTemplate(db.Database{TemplateHash: hash, Config: template}) + tc.templates[hash] = NewTemplate(hash, config) return true, unlock } diff --git a/pkg/templates/template_collection_test.go b/pkg/templates/template_collection_test.go index 5fbec40..2cd41f1 100644 --- a/pkg/templates/template_collection_test.go +++ b/pkg/templates/template_collection_test.go @@ -15,9 +15,11 @@ func TestTemplateCollection(t *testing.T) { ctx := context.Background() coll := templates.NewCollection() - cfg := db.DatabaseConfig{ - Username: "ich", - Database: "template_test", + cfg := templates.TemplateConfig{ + DatabaseConfig: db.DatabaseConfig{ + Username: "ich", + Database: "template_test", + }, } hash := "123" diff --git a/pkg/templates/template_test.go b/pkg/templates/template_test.go index 93a3158..734c328 100644 --- a/pkg/templates/template_test.go +++ b/pkg/templates/template_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/allaboutapps/integresql/pkg/db" "github.com/allaboutapps/integresql/pkg/templates" "github.com/stretchr/testify/assert" ) @@ -15,7 +14,7 @@ import ( func TestTemplateGetSetState(t *testing.T) { ctx := context.Background() - t1 := templates.NewTemplate(db.Database{TemplateHash: "123"}) + t1 := templates.NewTemplate("123", templates.TemplateConfig{}) state := t1.GetState(ctx) assert.Equal(t, templates.TemplateStateInit, state) @@ -33,7 +32,7 @@ func TestTemplateWaitForReady(t *testing.T) { goroutineNum := 10 // initalize a new template, not ready yet - t1 := templates.NewTemplate(db.Database{TemplateHash: "123"}) + t1 := templates.NewTemplate("123", templates.TemplateConfig{}) state := t1.GetState(ctx) assert.Equal(t, templates.TemplateStateInit, state) From f13f2ed8d6a04443183c0acf9be916165ed00f16 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 07:59:20 +0000 Subject: [PATCH 115/160] allow template config update --- pkg/templates/template.go | 12 ++++++++ pkg/templates/template_collection.go | 13 ++++++--- pkg/templates/template_collection_test.go | 35 +++++++++++++++++++++++ 3 files changed, 56 insertions(+), 4 deletions(-) diff --git a/pkg/templates/template.go b/pkg/templates/template.go index bf8c97d..3dbd328 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -49,6 +49,13 @@ func (t *Template) IsResetEnabled(ctx context.Context) bool { return t.ResetEnabled } +func (t *Template) GetConfig(ctx context.Context) TemplateConfig { + t.mutex.RLock() + defer t.mutex.RUnlock() + + return t.TemplateConfig +} + // GetState locks the template and checks its state. func (t *Template) GetState(ctx context.Context) TemplateState { t.mutex.RLock() @@ -122,3 +129,8 @@ func (l lockedTemplate) SetState(ctx context.Context, newState TemplateState) { l.t.state = newState l.t.cond.Broadcast() } + +func (c TemplateConfig) Equals(other TemplateConfig) bool { + return c.ResetEnabled == other.ResetEnabled && + c.DatabaseConfig.Database == other.DatabaseConfig.Database +} diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go index 7b41af5..1d4f31f 100644 --- a/pkg/templates/template_collection.go +++ b/pkg/templates/template_collection.go @@ -22,8 +22,8 @@ func NewCollection() *Collection { } // Push tries to add a new template to the collection. -// Returns added=false, if the template has been there already. -// In such case, it is not overwritten! To replace a template, first remove it (via Pop) and then Push again. +// If the template already exists and the config matches, added=false is returned. +// If config doesn't match, the template is overwritten and added=true is returned. // This function locks the collection and no matter what is its output, the unlock function needs to be called to release the lock. func (tc *Collection) Push(ctx context.Context, hash string, config TemplateConfig) (added bool, unlock Unlock) { reg := trace.StartRegion(ctx, "get_template_lock") @@ -34,9 +34,14 @@ func (tc *Collection) Push(ctx context.Context, hash string, config TemplateConf reg.End() } - _, ok := tc.templates[hash] + template, ok := tc.templates[hash] if ok { - return false, unlock + // check if settings match + + if template.GetConfig(ctx).Equals(config) { + return false, unlock + } + // else overwrite the template } tc.templates[hash] = NewTemplate(hash, config) diff --git a/pkg/templates/template_collection_test.go b/pkg/templates/template_collection_test.go index 2cd41f1..99a8b94 100644 --- a/pkg/templates/template_collection_test.go +++ b/pkg/templates/template_collection_test.go @@ -60,3 +60,38 @@ func TestTemplateCollection(t *testing.T) { assert.True(t, found) assert.Equal(t, "ich", template3.Config.Username) } + +func TestTemplateCollectionPushWithOtherConfig(t *testing.T) { + ctx := context.Background() + + coll := templates.NewCollection() + cfg := templates.TemplateConfig{ + DatabaseConfig: db.DatabaseConfig{ + Username: "ich", + Database: "template_test", + }, + ResetEnabled: true, + } + hash := "123" + + added, unlock := coll.Push(ctx, hash, cfg) + assert.True(t, added) + unlock() + + added, unlock = coll.Push(ctx, hash, cfg) + assert.False(t, added) + unlock() + + cfg.ResetEnabled = false + cfg.Database = "template_another" + added, unlock = coll.Push(ctx, hash, cfg) + assert.True(t, added) + unlock() + + // try to get again when the template is locked + template, found := coll.Get(ctx, hash) + assert.True(t, found) + assert.False(t, template.ResetEnabled) + assert.Equal(t, "template_another", template.Config.Database) + +} From 620bf6afd0df8e186f97fce5968f083c09094e1b Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 08:09:49 +0000 Subject: [PATCH 116/160] require pool init before AddTestDatabase --- pkg/manager/manager.go | 2 +- pkg/pool/pool_collection.go | 11 +++------- pkg/pool/pool_collection_test.go | 35 ++++++++++++++++++++------------ 3 files changed, 26 insertions(+), 22 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 0ed2150..b206027 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -549,7 +549,7 @@ func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template * return ErrInvalidTemplateState } - return m.pool.AddTestDatabase(ctx, template.Database, m.recreateTestDB) + return m.pool.AddTestDatabase(ctx, template.Database) } // Adds new test databases for a template, intended to be run asynchronously from other operations in a separate goroutine, using the manager's WaitGroup to synchronize for shutdown. diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index aea2413..9c89bea 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -51,10 +51,6 @@ func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Databas p.mutex.Lock() defer p.mutex.Unlock() - _ = p.initHashPool(ctx, templateDB, initDBFunc) -} - -func (p *PoolCollection) initHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { // create a new dbHashPool pool := newDBHashPool(p.PoolConfig, templateDB, initDBFunc) // and start the cleaning worker @@ -62,8 +58,6 @@ func (p *PoolCollection) initHashPool(ctx context.Context, templateDB db.Databas // pool is ready p.pools[pool.templateDB.TemplateHash] = pool - - return pool } // Stop is used to stop all background workers @@ -148,7 +142,7 @@ func (p *PoolCollection) GetTestDatabase(ctx context.Context, hash string, timeo // The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. // If the pool size has already reached MAX, ErrPoolFull is returned, unless ForceDBReturn flag is set to false. // Then databases that were given away would get reset (if no DB connection is currently open) and marked as 'Ready'. -func (p *PoolCollection) AddTestDatabase(ctx context.Context, templateDB db.Database, initFunc RecreateDBFunc) error { +func (p *PoolCollection) AddTestDatabase(ctx context.Context, templateDB db.Database) error { hash := templateDB.TemplateHash // ! @@ -159,7 +153,8 @@ func (p *PoolCollection) AddTestDatabase(ctx context.Context, templateDB db.Data pool := p.pools[hash] if pool == nil { - pool = p.initHashPool(ctx, templateDB, initFunc) + p.mutex.Unlock() + return ErrUnknownHash } forceReturn := p.ForceDBReturn diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index b5f3c49..66ff02b 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -36,13 +36,14 @@ func TestPoolAddGet(t *testing.T) { t.Log("(re)create ", testDB.Database) return nil } + p.InitHashPool(ctx, templateDB, initFunc) // get from empty _, err := p.GetTestDatabase(ctx, hash1, 0) assert.Error(t, err, pool.ErrTimeout) // add a new one - assert.NoError(t, p.AddTestDatabase(ctx, templateDB, initFunc)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB)) // get it testDB, err := p.GetTestDatabase(ctx, hash1, 0) assert.NoError(t, err) @@ -50,10 +51,12 @@ func TestPoolAddGet(t *testing.T) { assert.Equal(t, "ich", testDB.Database.Config.Username) // add for h2 - templateDB.TemplateHash = hash2 - assert.NoError(t, p.AddTestDatabase(ctx, templateDB, initFunc)) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB, initFunc)) - assert.ErrorIs(t, p.AddTestDatabase(ctx, templateDB, initFunc), pool.ErrPoolFull) + templateDB2 := templateDB + templateDB2.TemplateHash = hash2 + p.InitHashPool(ctx, templateDB2, initFunc) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) + assert.ErrorIs(t, p.AddTestDatabase(ctx, templateDB2), pool.ErrPoolFull) // get from empty h1 _, err = p.GetTestDatabase(ctx, hash1, 0) @@ -114,8 +117,8 @@ func TestPoolAddGetConcurrent(t *testing.T) { // add DBs sequentially for i := 0; i < cfg.MaxPoolSize; i++ { - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) time.Sleep(sleepDuration) } }() @@ -166,13 +169,15 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { ForceDBReturn: true, } p := pool.NewPoolCollection(cfg) + p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB2, initFunc) var wg sync.WaitGroup // add DBs sequentially for i := 0; i < cfg.MaxPoolSize/2; i++ { - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) } // try to get them from another goroutines in parallel @@ -224,11 +229,13 @@ func TestPoolRemoveAll(t *testing.T) { ForceDBReturn: true, } p := pool.NewPoolCollection(cfg) + p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB2, initFunc) // add DBs sequentially for i := 0; i < cfg.MaxPoolSize; i++ { - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB2, initFunc)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) } // remove all @@ -241,7 +248,8 @@ func TestPoolRemoveAll(t *testing.T) { assert.Error(t, err, pool.ErrTimeout) // start using pool again - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) + p.InitHashPool(ctx, templateDB1, initFunc) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) testDB, err := p.GetTestDatabase(ctx, hash1, 0) assert.NoError(t, err) assert.Equal(t, 0, testDB.ID) @@ -270,11 +278,12 @@ func TestPoolInit(t *testing.T) { ForceDBReturn: true, } p := pool.NewPoolCollection(cfg) + p.InitHashPool(ctx, templateDB1, initFunc) // we will test 2 ways of adding new DBs for i := 0; i < cfg.MaxPoolSize/2; i++ { // add and get freshly added DB - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1, initFunc)) + assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) _, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) assert.NoError(t, err) From 2c6d16f09bb673d2f2559b0923f6bdbe54163c0d Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 08:17:20 +0000 Subject: [PATCH 117/160] remove all from pool if template config changed --- pkg/manager/manager.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index b206027..fd31d6c 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -214,6 +214,14 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, e } reg.End() + // if template config has been overwritten, the existing pool needs to be removed + err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB) + if err != nil && !errors.Is(err, pool.ErrUnknownHash){ + m.templates.RemoveUnsafe(ctx, hash) + + return db.TemplateDatabase{}, err + } + return db.TemplateDatabase{ Database: db.Database{ TemplateHash: hash, @@ -510,10 +518,14 @@ func (m *Manager) createDatabase(ctx context.Context, dbName string, owner strin return nil } -func (m *Manager) recreateTestDB(ctx context.Context, testDB db.TestDatabase, templateName string) error { +func (m *Manager) recreateTestPoolDB(ctx context.Context, testDB db.TestDatabase, templateName string) error { return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) } +func (m *Manager) dropTestPoolDB(ctx context.Context, testDB db.TestDatabase) error { + return return m.dropDatabase(ctx, testDB.Config.Database) +} + func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { defer trace.StartRegion(ctx, "drop_db").End() From 87e06421bd597afc78408041b11f59e1db35585f Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 09:36:51 +0000 Subject: [PATCH 118/160] add ctx to RemoveFunc definition --- pkg/manager/manager.go | 26 +++++++++----------------- pkg/pool/pool.go | 4 ++-- pkg/pool/pool_collection.go | 11 +++++++---- pkg/pool/pool_collection_test.go | 2 +- 4 files changed, 19 insertions(+), 24 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index fd31d6c..45a48c2 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -216,7 +216,7 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, e // if template config has been overwritten, the existing pool needs to be removed err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB) - if err != nil && !errors.Is(err, pool.ErrUnknownHash){ + if err != nil && !errors.Is(err, pool.ErrUnknownHash) { m.templates.RemoveUnsafe(ctx, hash) return db.TemplateDatabase{}, err @@ -242,9 +242,7 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro m.wg.Wait() // first remove all DB with this hash - if err := m.pool.RemoveAllWithHash(ctx, hash, func(testDB db.TestDatabase) error { - return m.dropDatabase(ctx, testDB.Database.Config.Database) - }); err != nil && !errors.Is(err, pool.ErrUnknownHash) { + if err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB); err != nil && !errors.Is(err, pool.ErrUnknownHash) { return err } @@ -296,7 +294,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db } // Init a pool with this hash - m.pool.InitHashPool(ctx, template.Database, m.recreateTestDB) + m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB) lockedTemplate.SetState(ctx, templates.TemplateStateFinalized) m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) @@ -340,7 +338,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData // Template exists, but the pool is not there - // it must have been removed. // It needs to be reinitialized. - m.pool.InitHashPool(ctx, template.Database, m.recreateTestDB) + m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB) // pool initalized, create one test db testDB, err = m.pool.ExtendPool(ctx, template.Database) @@ -419,6 +417,7 @@ func (m *Manager) ResetTestDatabase(ctx context.Context, hash string, id int) er return m.dropDatabaseWithID(ctx, hash, id) } + // don't allow to reset if it's not enabled for this template if !template.IsResetEnabled(ctx) { return nil } @@ -450,11 +449,7 @@ func (m *Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) er return ErrManagerNotReady } - removeFunc := func(testDB db.TestDatabase) error { - return m.dropDatabase(ctx, testDB.Config.Database) - } - - err := m.pool.RemoveAllWithHash(ctx, hash, removeFunc) + err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB) if errors.Is(err, pool.ErrUnknownHash) { return ErrTemplateNotFound } @@ -470,10 +465,7 @@ func (m *Manager) ResetAllTracking(ctx context.Context) error { // remove all templates to disallow any new test DB creation from existing templates m.templates.RemoveAll(ctx) - removeFunc := func(testDB db.TestDatabase) error { - return m.dropDatabase(ctx, testDB.Config.Database) - } - if err := m.pool.RemoveAll(ctx, removeFunc); err != nil { + if err := m.pool.RemoveAll(ctx, m.dropTestPoolDB); err != nil { return err } @@ -523,7 +515,7 @@ func (m *Manager) recreateTestPoolDB(ctx context.Context, testDB db.TestDatabase } func (m *Manager) dropTestPoolDB(ctx context.Context, testDB db.TestDatabase) error { - return return m.dropDatabase(ctx, testDB.Config.Database) + return m.dropDatabase(ctx, testDB.Config.Database) } func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { @@ -576,7 +568,7 @@ func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Templa for i := 0; i < count; i++ { if err := m.createTestDatabaseFromTemplate(ctx, template); err != nil { // TODO anna: error handling - // fmt.Printf("integresql: failed to initialize DB: %v\n", err) + fmt.Printf("integresql: failed to initialize DB from template: %v\n", err) } } }() diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 701f8a8..36a7be5 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -256,7 +256,7 @@ func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix strin // ! } -func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error { +func (pool *dbHashPool) removeAll(ctx context.Context, removeFunc RemoveDBFunc) error { // stop the worker // we don't close here because if the remove operation fails, we want to be able to repeat it @@ -278,7 +278,7 @@ func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error for id := len(pool.dbs) - 1; id >= 0; id-- { testDB := pool.dbs[id].TestDatabase - if err := removeFunc(testDB); err != nil { + if err := removeFunc(ctx, testDB); err != nil { return err } diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index 9c89bea..e7f3294 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -37,6 +37,9 @@ func NewPoolCollection(cfg PoolConfig) *PoolCollection { // RecreateDBFunc callback executed when a pool is extended or the DB cleaned up by a worker. type RecreateDBFunc func(ctx context.Context, testDB db.TestDatabase, templateName string) error +// RemoveDBFunc callback executed to remove a database +type RemoveDBFunc func(ctx context.Context, testDB db.TestDatabase) error + func makeActualRecreateTestDBFunc(templateName string, userRecreateFunc RecreateDBFunc) recreateTestDBFunc { return func(ctx context.Context, testDBWrapper *existingDB) error { testDBWrapper.createdAt = time.Now() @@ -322,7 +325,7 @@ func (p *PoolCollection) ReturnTestDatabase(ctx context.Context, hash string, id // RemoveAllWithHash removes a pool with a given template hash. // All background workers belonging to this pool are stopped. -func (p *PoolCollection) RemoveAllWithHash(ctx context.Context, hash string, removeFunc func(db.TestDatabase) error) error { +func (p *PoolCollection) RemoveAllWithHash(ctx context.Context, hash string, removeFunc RemoveDBFunc) error { // ! // PoolCollection locked @@ -336,7 +339,7 @@ func (p *PoolCollection) RemoveAllWithHash(ctx context.Context, hash string, rem return ErrUnknownHash } - if err := pool.removeAll(removeFunc); err != nil { + if err := pool.removeAll(ctx, removeFunc); err != nil { return err } @@ -349,14 +352,14 @@ func (p *PoolCollection) RemoveAllWithHash(ctx context.Context, hash string, rem } // RemoveAll removes all tracked pools. -func (p *PoolCollection) RemoveAll(ctx context.Context, removeFunc func(db.TestDatabase) error) error { +func (p *PoolCollection) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) error { // ! // PoolCollection locked p.mutex.Lock() defer p.mutex.Unlock() for hash, pool := range p.pools { - if err := pool.removeAll(removeFunc); err != nil { + if err := pool.removeAll(ctx, removeFunc); err != nil { return err } diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index 66ff02b..01bb2f2 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -217,7 +217,7 @@ func TestPoolRemoveAll(t *testing.T) { t.Log("(re)create ", testDB.Database) return nil } - removeFunc := func(testDB db.TestDatabase) error { + removeFunc := func(ctx context.Context, testDB db.TestDatabase) error { t.Log("remove ", testDB.Database) return nil } From 7f172d470f0ce8b70a6b95ffcb4e68f7d4b2cebd Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 10:56:57 +0000 Subject: [PATCH 119/160] refactor pool, rename forceReturn to enableDBReset --- pkg/manager/manager.go | 8 +- pkg/manager/manager_config.go | 4 +- pkg/manager/manager_test.go | 30 ++-- pkg/pool/pool.go | 170 ++++++++++++++++-- pkg/pool/pool_collection.go | 292 +++++++------------------------ pkg/pool/pool_collection_test.go | 42 ++--- 6 files changed, 264 insertions(+), 282 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 45a48c2..9579d6e 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -58,7 +58,7 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { MaxPoolSize: config.TestDatabaseMaxPoolSize, TestDBNamePrefix: testDBPrefix, NumOfWorkers: config.NumOfCleaningWorkers, - ForceDBReturn: config.TestDatabaseForceReturn, + EnableDBReset: config.TestDatabaseEnableReset, }, ), connectionCtx: context.TODO(), @@ -294,7 +294,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db } // Init a pool with this hash - m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB) + m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB, template.ResetEnabled) lockedTemplate.SetState(ctx, templates.TemplateStateFinalized) m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) @@ -338,7 +338,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData // Template exists, but the pool is not there - // it must have been removed. // It needs to be reinitialized. - m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB) + m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB, template.IsResetEnabled(ctx)) // pool initalized, create one test db testDB, err = m.pool.ExtendPool(ctx, template.Database) @@ -351,7 +351,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData return db.TestDatabase{}, err } - if !m.config.TestDatabaseForceReturn { + if !m.config.TestDatabaseEnableReset { // before returning create a new test database in background m.wg.Add(1) go func(ctx context.Context, templ *templates.Template) { diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 8fac160..a218a48 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -21,7 +21,7 @@ type ManagerConfig struct { TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state TestDatabaseGetTimeout time.Duration // Time to wait for a ready database before extending the pool NumOfCleaningWorkers int // Number of pool workers cleaning up dirty DBs - TestDatabaseForceReturn bool // Force returning used test DBs. If set to true, error "pool full" can be returned when extending is requested and max pool size is reached. Otherwise old test DBs will be reused. + TestDatabaseEnableReset bool // Enables resetting test databases with the cleanup workers. If this flag is on, it's no longer possible to reuse dirty (currently in use, 'locked') databases when MAX pool size is reached. } func DefaultManagerConfigFromEnv() ManagerConfig { @@ -61,6 +61,6 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 20000)), TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), NumOfCleaningWorkers: util.GetEnvAsInt("INTEGRESQL_NUM_OF_CLEANING_WORKERS", 3), - TestDatabaseForceReturn: util.GetEnvAsBool("INTEGRESQL_TEST_DB_FORCE_RETURN", false), + TestDatabaseEnableReset: util.GetEnvAsBool("INTEGRESQL_TEST_DB_ENABLE_RESET", false), } } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index b26635a..6d85592 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -322,7 +322,7 @@ func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -354,7 +354,7 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -482,7 +482,7 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -550,7 +550,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -606,7 +606,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { t.Fatalf("finalize template should not work: %v", err) } - _, err = m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + _, err = m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) if err != nil { t.Fatalf("reinitialize after discard template database should work: %v", err) } @@ -623,7 +623,7 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseInitialPoolSize = 3 cfg.TestDatabaseMaxPoolSize = 3 - cfg.TestDatabaseForceReturn = true + cfg.TestDatabaseEnableReset = true cfg.TestDatabaseGetTimeout = 200 * time.Millisecond m, _ := testManagerWithConfig(cfg) @@ -635,7 +635,7 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -670,7 +670,7 @@ func TestManagerGetTestDatabaseExtendingPoolForceReturn(t *testing.T) { cfg.TestDatabaseMaxPoolSize = 10 cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond // force DB return - cfg.TestDatabaseForceReturn = true + cfg.TestDatabaseEnableReset = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -681,7 +681,7 @@ func TestManagerGetTestDatabaseExtendingPoolForceReturn(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -724,7 +724,7 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { cfg.TestDatabaseInitialPoolSize = 5 cfg.TestDatabaseMaxPoolSize = 5 // enable reusing old not returned databases - cfg.TestDatabaseForceReturn = false + cfg.TestDatabaseEnableReset = false m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -735,7 +735,7 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset /*enableDBReset */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -795,7 +795,7 @@ func TestManagerReturnResetTestDatabase(t *testing.T) { cfg.TestDatabaseInitialPoolSize = 10 cfg.NumOfCleaningWorkers = 2 cfg.TestDatabaseMaxPoolSize = 10 - cfg.TestDatabaseForceReturn = true + cfg.TestDatabaseEnableReset = true cfg.TestDatabaseGetTimeout = 200 * time.Millisecond tests := []struct { @@ -842,7 +842,7 @@ func TestManagerReturnResetTestDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -1031,7 +1031,7 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() // there are no db added in background cfg.TestDatabaseInitialPoolSize = 0 - cfg.TestDatabaseForceReturn = true + cfg.TestDatabaseEnableReset = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -1042,7 +1042,7 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 36a7be5..0d9834f 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -12,7 +12,6 @@ import ( ) var ( - ErrUnknownHash = errors.New("no database pool exists for this hash") ErrPoolFull = errors.New("database pool is full") ErrInvalidState = errors.New("database state is not valid for this operation") ErrInvalidIndex = errors.New("invalid database index (id)") @@ -44,15 +43,17 @@ type dbHashPool struct { recreateDB recreateTestDBFunc templateDB db.Database - - numOfWorkers int - forceDBReturn bool + PoolConfig sync.RWMutex wg sync.WaitGroup } -func newDBHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *dbHashPool { +// newDBHashPool creates new hash pool with the given config. enableDBReset overwrites the config given in PoolConfig parameter. +func newDBHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc, enableDBReset bool) *dbHashPool { + + cfg.EnableDBReset = enableDBReset + return &dbHashPool{ dbs: make([]existingDB, 0, cfg.MaxPoolSize), ready: make(chan int, cfg.MaxPoolSize), @@ -60,9 +61,154 @@ func newDBHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDB dirty: make(chan int, 3*cfg.MaxPoolSize), // here indexes can be duplicated recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), templateDB: templateDB, - numOfWorkers: cfg.NumOfWorkers, - forceDBReturn: cfg.ForceDBReturn, + PoolConfig: cfg, + } +} + +func (pool *dbHashPool) Stop() { + close(pool.waitingForCleaning) + pool.wg.Wait() +} + +func (pool *dbHashPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { + var index int + select { + case <-time.After(timeout): + err = ErrTimeout + return + case index = <-pool.ready: + } + + reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") + pool.Lock() + defer pool.Unlock() + reg.End() + + // sanity check, should never happen + if index < 0 || index >= len(pool.dbs) { + err = ErrInvalidIndex + return + } + + testDB := pool.dbs[index] + // sanity check, should never happen - we got this index from 'ready' channel + if testDB.state != dbStateReady { + err = ErrInvalidState + return + } + + testDB.state = dbStateDirty + pool.dbs[index] = testDB + + select { + case pool.dirty <- index: + // sent to dirty without blocking + default: + // channel is full + } + + return testDB.TestDatabase, nil + +} + +func (pool *dbHashPool) AddTestDatabase(ctx context.Context, templateDB db.Database) error { + + newTestDB, err := pool.extend(ctx, dbStateReady) + if err != nil { + if errors.Is(err, ErrPoolFull) && !pool.EnableDBReset { + // we can try to reset test databases that are 'dirty' + _, err := pool.resetNotReturned(ctx, false /* shouldKeepDirty */) + return err + } + + return err } + + // and add its index to 'ready' + pool.ready <- newTestDB.ID + + return nil +} + +func (pool *dbHashPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { + + // because we return it right away, we treat it as 'dirty' + testDB, err := pool.extend(ctx, dbStateDirty) + if err != nil { + if errors.Is(err, ErrPoolFull) && !pool.EnableDBReset { + // we can try to reset test databases that are 'dirty' + return pool.resetNotReturned(ctx, true /* shouldKeepDirty */) + } + + return db.TestDatabase{}, err + } + + select { + case pool.dirty <- testDB.ID: + // sent to dirty without blocking + default: + // channel is full + } + + return testDB, nil +} + +func (pool *dbHashPool) ResetTestDatabase(ctx context.Context, hash string, id int) error { + reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") + pool.Lock() + defer pool.Unlock() + reg.End() + + if id < 0 || id >= len(pool.dbs) { + return ErrInvalidIndex + } + + // check if db is in the correct state + testDB := pool.dbs[id] + if testDB.state == dbStateReady { + return nil + } + + if testDB.state != dbStateDirty { + return ErrInvalidState + } + + testDB.state = dbStateWaitingForCleaning + pool.dbs[id] = testDB + + // add it to waitingForCleaning channel, to have it cleaned up by the worker + pool.waitingForCleaning <- id + + return nil + +} + +func (pool *dbHashPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { + pool.Lock() + defer pool.Unlock() + + if id < 0 || id >= len(pool.dbs) { + return ErrInvalidIndex + } + + // check if db is in the correct state + testDB := pool.dbs[id] + if testDB.state == dbStateReady { + return nil + } + + // if not in use, it will be cleaned up by a worker + if testDB.state != dbStateDirty { + return ErrInvalidState + } + + testDB.state = dbStateReady + pool.dbs[id] = testDB + + pool.ready <- id + + return nil + } func (pool *dbHashPool) enableWorker(numberOfWorkers int) { @@ -129,7 +275,7 @@ func (pool *dbHashPool) workerCleanUpReturnedDB() { } } -func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix string) (db.TestDatabase, error) { +func (pool *dbHashPool) extend(ctx context.Context, state dbState) (db.TestDatabase, error) { // ! // dbHashPool locked reg := trace.StartRegion(ctx, "extend_wait_for_lock_hash_pool") @@ -156,7 +302,7 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix }, } // set DB name - newTestDB.Database.Config.Database = makeDBName(testDBPrefix, pool.templateDB.TemplateHash, index) + newTestDB.Database.Config.Database = makeDBName(pool.TestDBNamePrefix, pool.templateDB.TemplateHash, index) if err := pool.recreateDB(ctx, &newTestDB); err != nil { return db.TestDatabase{}, err @@ -173,7 +319,7 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix // resetNotReturned recreates one DB that is 'dirty' and to which no db clients are connected (so it can be dropped). // If shouldKeepDirty is set to true, the DB state remains 'dirty'. Otherwise, it is marked as 'Ready' // and can be obtained again with GetTestDatabase request - in such case error is nil but returned db.TestDatabase is empty. -func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix string, shouldKeepDirty bool) (db.TestDatabase, error) { +func (pool *dbHashPool) resetNotReturned(ctx context.Context, shouldKeepDirty bool) (db.TestDatabase, error) { var testDB existingDB var index int found := false @@ -256,11 +402,11 @@ func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix strin // ! } -func (pool *dbHashPool) removeAll(ctx context.Context, removeFunc RemoveDBFunc) error { +func (pool *dbHashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) error { // stop the worker // we don't close here because if the remove operation fails, we want to be able to repeat it - for i := 0; i < pool.numOfWorkers; i++ { + for i := 0; i < pool.NumOfWorkers; i++ { pool.waitingForCleaning <- stopWorkerMessage } pool.wg.Wait() diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index e7f3294..de54108 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -11,11 +11,13 @@ import ( "github.com/allaboutapps/integresql/pkg/db" ) +var ErrUnknownHash = errors.New("no database pool exists for this hash") + type PoolConfig struct { MaxPoolSize int TestDBNamePrefix string - NumOfWorkers int // Number of cleaning workers (each hash pool has enables this number of workers) - ForceDBReturn bool // Force returning test DB. If set to false, test databases that are 'dirty' can be recycled (in not actually used). + NumOfWorkers int // Number of cleaning workers (each hash pool runs this number of workers). + EnableDBReset bool // Enables resetting test databases with the cleanup workers. If this flag is on, it's no longer possible to reuse dirty (currently in use, 'locked') databases when MAX pool size is reached. } type PoolCollection struct { @@ -25,7 +27,7 @@ type PoolCollection struct { mutex sync.RWMutex } -// forceDBReturn set to false will allow reusing test databases that are marked as 'dirty'. +// enableDBReset set to false will allow reusing test databases that are marked as 'dirty'. // Otherwise, test DB has to be returned when no longer needed and there are higher chances of getting ErrPoolFull when requesting a new DB. func NewPoolCollection(cfg PoolConfig) *PoolCollection { return &PoolCollection{ @@ -50,12 +52,12 @@ func makeActualRecreateTestDBFunc(templateName string, userRecreateFunc Recreate type recreateTestDBFunc func(context.Context, *existingDB) error // InitHashPool creates a new pool with a given template hash and starts the cleanup workers. -func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { +func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc, enableDBReset bool) { p.mutex.Lock() defer p.mutex.Unlock() // create a new dbHashPool - pool := newDBHashPool(p.PoolConfig, templateDB, initDBFunc) + pool := newDBHashPool(p.PoolConfig, templateDB, initDBFunc, enableDBReset) // and start the cleaning worker pool.enableWorker(p.NumOfWorkers) @@ -65,12 +67,11 @@ func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Databas // Stop is used to stop all background workers func (p *PoolCollection) Stop() { - p.mutex.Lock() - defer p.mutex.Unlock() + p.mutex.RLock() + defer p.mutex.RUnlock() for _, pool := range p.pools { - close(pool.waitingForCleaning) - pool.wg.Wait() + pool.Stop() } } @@ -80,150 +81,40 @@ func (p *PoolCollection) Stop() { // Otherwise, the obtained test DB is marked as 'dirty' and can be reused only if returned to the pool. func (p *PoolCollection) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { - // ! - // PoolCollection locked - reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") - p.mutex.RLock() - reg.End() - pool := p.pools[hash] - - if pool == nil { - // no such pool - p.mutex.RUnlock() - err = ErrUnknownHash - return - } - - p.mutex.RUnlock() - // PoolCollection unlocked - // ! - - var index int - select { - case <-time.After(timeout): - err = ErrTimeout - return - case index = <-pool.ready: - } - - // ! - // dbHashPool locked - reg = trace.StartRegion(ctx, "wait_for_lock_hash_pool") - pool.Lock() - defer pool.Unlock() - reg.End() - - // sanity check, should never happen - if index < 0 || index >= len(pool.dbs) { - err = ErrInvalidIndex - return - } - - testDB := pool.dbs[index] - // sanity check, should never happen - we got this index from 'ready' channel - if testDB.state != dbStateReady { - err = ErrInvalidState - return - } - - testDB.state = dbStateDirty - pool.dbs[index] = testDB - - select { - case pool.dirty <- index: - // sent to dirty without blocking - default: - // channel is full + pool, err := p.getPool(ctx, hash) + if err != nil { + return db, err } - return testDB.TestDatabase, nil - // dbHashPool unlocked - // ! + return pool.GetTestDatabase(ctx, hash, timeout) } // AddTestDatabase adds a new test DB to the pool and creates it according to the template. // The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. -// If the pool size has already reached MAX, ErrPoolFull is returned, unless ForceDBReturn flag is set to false. +// If the pool size has already reached MAX, ErrPoolFull is returned, unless EnableDBReset flag is set to false. // Then databases that were given away would get reset (if no DB connection is currently open) and marked as 'Ready'. func (p *PoolCollection) AddTestDatabase(ctx context.Context, templateDB db.Database) error { hash := templateDB.TemplateHash - // ! - // PoolCollection locked - reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") - p.mutex.Lock() - reg.End() - pool := p.pools[hash] - - if pool == nil { - p.mutex.Unlock() - return ErrUnknownHash - } - - forceReturn := p.ForceDBReturn - p.mutex.Unlock() - // PoolCollection unlocked - // ! - - newTestDB, err := pool.extend(ctx, dbStateReady, p.PoolConfig.TestDBNamePrefix) + pool, err := p.getPool(ctx, hash) if err != nil { - if errors.Is(err, ErrPoolFull) && !forceReturn { - // we can try to reset test databases that are 'dirty' - _, err := pool.resetNotReturned(ctx, p.TestDBNamePrefix, false /* shouldKeepDirty */) - return err - } - return err } - // and add its index to 'ready' - pool.ready <- newTestDB.ID - - return nil + return pool.AddTestDatabase(ctx, templateDB) } -// AddTestDatabase adds a new test DB to the pool, creates it according to the template, and returns it right away to the caller. +// ExtendPool adds a new test DB to the pool, creates it according to the template, and returns it right away to the caller. // The new test DB is marked as 'IsUse' and won't be picked up with GetTestDatabase, until it's returned to the pool. func (p *PoolCollection) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { hash := templateDB.TemplateHash - // ! - // PoolCollection locked - reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") - p.mutex.Lock() - reg.End() - pool := p.pools[hash] - - if pool == nil { - // meant to be only for already initialized pools - p.mutex.Unlock() - return db.TestDatabase{}, ErrUnknownHash - } - - forceReturn := p.ForceDBReturn - p.mutex.Unlock() - // PoolCollection unlocked - // ! - - // because we return it right away, we treat it as 'dirty' - testDB, err := pool.extend(ctx, dbStateDirty, p.PoolConfig.TestDBNamePrefix) + pool, err := p.getPool(ctx, hash) if err != nil { - if errors.Is(err, ErrPoolFull) && !forceReturn { - // we can try to reset test databases that are 'dirty' - return pool.resetNotReturned(ctx, p.TestDBNamePrefix, true /* shouldKeepDirty */) - } - return db.TestDatabase{}, err } - select { - case pool.dirty <- testDB.ID: - // sent to dirty without blocking - default: - // channel is full - } - - return testDB, nil + return pool.ExtendPool(ctx, templateDB) } // ResetTestDatabase recreates the given test DB and returns it back to the pool. @@ -231,115 +122,36 @@ func (p *PoolCollection) ExtendPool(ctx context.Context, templateDB db.Database) // If the test DB is in a different state than 'dirty', ErrInvalidState is returned. func (p *PoolCollection) ResetTestDatabase(ctx context.Context, hash string, id int) error { - // ! - // PoolCollection locked - reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") - p.mutex.Lock() - reg.End() - pool := p.pools[hash] - - if pool == nil { - // no such pool - p.mutex.Unlock() - return ErrUnknownHash - } - - // ! - // dbHashPool locked - reg = trace.StartRegion(ctx, "wait_for_lock_hash_pool") - pool.Lock() - defer pool.Unlock() - reg.End() - - p.mutex.Unlock() - // PoolCollection unlocked - // ! - - if id < 0 || id >= len(pool.dbs) { - return ErrInvalidIndex - } - - // check if db is in the correct state - testDB := pool.dbs[id] - if testDB.state == dbStateReady { - return nil - } - - if testDB.state != dbStateDirty { - return ErrInvalidState + pool, err := p.getPool(ctx, hash) + if err != nil { + return err } - testDB.state = dbStateWaitingForCleaning - pool.dbs[id] = testDB - - // add it to waitingForCleaning channel, to have it cleaned up by the worker - pool.waitingForCleaning <- id - - return nil - // dbHashPool unlocked - // ! + return pool.ResetTestDatabase(ctx, hash, id) } // ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). // If the test DB is in a different state than 'dirty', ErrInvalidState is returned. func (p *PoolCollection) ReturnTestDatabase(ctx context.Context, hash string, id int) error { - // ! - // PoolCollection locked - reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") - p.mutex.Lock() - reg.End() - pool := p.pools[hash] - - if pool == nil { - // no such pool - p.mutex.Unlock() - return ErrUnknownHash - } - p.mutex.Unlock() - - pool.Lock() - defer pool.Unlock() - - if id < 0 || id >= len(pool.dbs) { - return ErrInvalidIndex - } - - // check if db is in the correct state - testDB := pool.dbs[id] - if testDB.state == dbStateReady { - return nil - } - - // if not in use, it will be cleaned up by a worker - if testDB.state != dbStateDirty { - return ErrInvalidState + pool, err := p.getPool(ctx, hash) + if err != nil { + return err } - testDB.state = dbStateReady - pool.dbs[id] = testDB - - pool.ready <- id - - return nil + return pool.ReturnTestDatabase(ctx, hash, id) } // RemoveAllWithHash removes a pool with a given template hash. // All background workers belonging to this pool are stopped. func (p *PoolCollection) RemoveAllWithHash(ctx context.Context, hash string, removeFunc RemoveDBFunc) error { + pool, collUnlock, err := p.getPoolLockCollection(ctx, hash) + defer collUnlock() - // ! - // PoolCollection locked - p.mutex.Lock() - defer p.mutex.Unlock() - - pool := p.pools[hash] - - if pool == nil { - // no such pool - return ErrUnknownHash + if err != nil { + return err } - if err := pool.removeAll(ctx, removeFunc); err != nil { + if err := pool.RemoveAll(ctx, removeFunc); err != nil { return err } @@ -347,19 +159,15 @@ func (p *PoolCollection) RemoveAllWithHash(ctx context.Context, hash string, rem delete(p.pools, hash) return nil - // PoolCollection unlocked - // ! } // RemoveAll removes all tracked pools. func (p *PoolCollection) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) error { - // ! - // PoolCollection locked p.mutex.Lock() defer p.mutex.Unlock() for hash, pool := range p.pools { - if err := pool.removeAll(ctx, removeFunc); err != nil { + if err := pool.RemoveAll(ctx, removeFunc); err != nil { return err } @@ -367,8 +175,6 @@ func (p *PoolCollection) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) } return nil - // PoolCollection unlocked - // ! } // MakeDBName makes a test DB name with the configured prefix, template hash and ID of the DB. @@ -383,3 +189,33 @@ func makeDBName(testDBPrefix string, hash string, id int) string { // db name has an ID in suffix return fmt.Sprintf("%s%s_%03d", testDBPrefix, hash, id) } + +func (p *PoolCollection) getPool(ctx context.Context, hash string) (pool *dbHashPool, err error) { + reg := trace.StartRegion(ctx, "wait_for_rlock_main_pool") + p.mutex.RLock() + defer p.mutex.RUnlock() + reg.End() + + pool, ok := p.pools[hash] + if !ok { + // no such pool + return nil, ErrUnknownHash + } + + return pool, nil +} + +func (p *PoolCollection) getPoolLockCollection(ctx context.Context, hash string) (pool *dbHashPool, unlock func(), err error) { + reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") + p.mutex.Lock() + unlock = func() { p.mutex.Unlock() } + reg.End() + + pool, ok := p.pools[hash] + if !ok { + // no such pool + err = ErrUnknownHash + } + + return pool, unlock, err +} diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index 01bb2f2..c3ebfac 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -19,7 +19,7 @@ func TestPoolAddGet(t *testing.T) { MaxPoolSize: 2, NumOfWorkers: 4, TestDBNamePrefix: "prefix_", - ForceDBReturn: true, + EnableDBReset: true, } p := pool.NewPoolCollection(cfg) @@ -36,7 +36,7 @@ func TestPoolAddGet(t *testing.T) { t.Log("(re)create ", testDB.Database) return nil } - p.InitHashPool(ctx, templateDB, initFunc) + p.InitHashPool(ctx, templateDB, initFunc, true /*enableDBReset*/) // get from empty _, err := p.GetTestDatabase(ctx, hash1, 0) @@ -53,7 +53,7 @@ func TestPoolAddGet(t *testing.T) { // add for h2 templateDB2 := templateDB templateDB2.TemplateHash = hash2 - p.InitHashPool(ctx, templateDB2, initFunc) + p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBReset*/) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) assert.ErrorIs(t, p.AddTestDatabase(ctx, templateDB2), pool.ErrPoolFull) @@ -95,7 +95,7 @@ func TestPoolAddGetConcurrent(t *testing.T) { MaxPoolSize: 6, NumOfWorkers: 4, TestDBNamePrefix: "", - ForceDBReturn: true, + EnableDBReset: true, } p := pool.NewPoolCollection(cfg) @@ -103,8 +103,8 @@ func TestPoolAddGetConcurrent(t *testing.T) { sleepDuration := 100 * time.Millisecond // initialize hash pool - p.InitHashPool(ctx, templateDB1, initFunc) - p.InitHashPool(ctx, templateDB2, initFunc) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBReset*/) // add DB in one goroutine wg.Add(1) @@ -166,11 +166,11 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { MaxPoolSize: 6, NumOfWorkers: 4, TestDBNamePrefix: "", - ForceDBReturn: true, + EnableDBReset: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc) - p.InitHashPool(ctx, templateDB2, initFunc) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBReset*/) var wg sync.WaitGroup @@ -226,11 +226,11 @@ func TestPoolRemoveAll(t *testing.T) { MaxPoolSize: 6, NumOfWorkers: 4, TestDBNamePrefix: "", - ForceDBReturn: true, + EnableDBReset: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc) - p.InitHashPool(ctx, templateDB2, initFunc) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBReset*/) // add DBs sequentially for i := 0; i < cfg.MaxPoolSize; i++ { @@ -248,7 +248,7 @@ func TestPoolRemoveAll(t *testing.T) { assert.Error(t, err, pool.ErrTimeout) // start using pool again - p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) testDB, err := p.GetTestDatabase(ctx, hash1, 0) assert.NoError(t, err) @@ -275,10 +275,10 @@ func TestPoolInit(t *testing.T) { MaxPoolSize: 100, NumOfWorkers: 150, TestDBNamePrefix: "", - ForceDBReturn: true, + EnableDBReset: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) // we will test 2 ways of adding new DBs for i := 0; i < cfg.MaxPoolSize/2; i++ { @@ -347,10 +347,10 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { MaxPoolSize: 40, NumOfWorkers: 1, TestDBNamePrefix: "test_", - ForceDBReturn: false, + EnableDBReset: false, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB1, initFunc, false /*enableDBReset*/) for i := 0; i < cfg.MaxPoolSize; i++ { // add and get freshly added DB @@ -411,10 +411,10 @@ func TestPoolReturnTestDatabase(t *testing.T) { cfg := pool.PoolConfig{ MaxPoolSize: 40, NumOfWorkers: 3, - ForceDBReturn: true, + EnableDBReset: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) for i := 0; i < cfg.MaxPoolSize; i++ { testDB, err := p.ExtendPool(ctx, templateDB1) @@ -457,10 +457,10 @@ func TestPoolResetTestDatabase(t *testing.T) { cfg := pool.PoolConfig{ MaxPoolSize: 40, NumOfWorkers: 3, - ForceDBReturn: true, + EnableDBReset: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) for i := 0; i < cfg.MaxPoolSize; i++ { testDB, err := p.ExtendPool(ctx, templateDB1) From 6e78810266f7c35bd6b1301657556bf83d36137d Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 11:30:45 +0000 Subject: [PATCH 120/160] refactor, rename dbHashPool to HashPool --- pkg/manager/manager.go | 7 +++- pkg/manager/manager_test.go | 63 +++++++++++++++++++++++++++++++-- pkg/pool/pool.go | 70 +++++++++++++++++++++---------------- pkg/pool/pool_collection.go | 20 ++++++----- 4 files changed, 119 insertions(+), 41 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 9579d6e..522eac1 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -186,6 +186,11 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, e return db.TemplateDatabase{}, ErrManagerNotReady } + if !m.config.TestDatabaseEnableReset { + // only if the main config allows for DB reset, it can be enabled + enableDBReset = false + } + dbName := m.makeTemplateDatabaseName(hash) templateConfig := templates.TemplateConfig{ DatabaseConfig: db.DatabaseConfig{ @@ -351,7 +356,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData return db.TestDatabase{}, err } - if !m.config.TestDatabaseEnableReset { + if !template.IsResetEnabled(ctx) { // before returning create a new test database in background m.wg.Add(1) go func(ctx context.Context, templ *templates.Template) { diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 6d85592..74d3748 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -876,11 +876,10 @@ func TestManagerReturnResetTestDatabase(t *testing.T) { } for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { + // assert that test db can be get again testDB, err := m.GetTestDatabase(ctx, hash) assert.NoError(t, err) - // assert that test db can be get again - // and that it has been cleaned up db, err := sql.Open("postgres", testDB.Config.ConnectionString()) require.NoError(t, err) require.NoError(t, db.PingContext(ctx)) @@ -893,6 +892,66 @@ func TestManagerReturnResetTestDatabase(t *testing.T) { } } +func TestManagerResetTestDatabaseRecreateDisabled(t *testing.T) { + ctx := context.Background() + + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TestDatabaseInitialPoolSize = 5 + cfg.NumOfCleaningWorkers = 2 + cfg.TestDatabaseMaxPoolSize = 10 + cfg.TestDatabaseEnableReset = true + cfg.TestDatabaseGetTimeout = 200 * time.Millisecond + + m, _ := testManagerWithConfig(cfg) + + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) + } + + defer disconnectManager(t, m) + + hash := "hashinghash" + + template, err := m.InitializeTemplateDatabase(ctx, hash, false /*enableReset*/) + if err != nil { + t.Fatalf("failed to initialize template database: %v", err) + } + + populateTemplateDB(t, template) + + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } + + testDB, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + + // open the connection and modify the test DB + db, err := sql.Open("postgres", testDB.Config.ConnectionString()) + require.NoError(t, err) + require.NoError(t, db.PingContext(ctx)) + + _, err = db.ExecContext(ctx, `INSERT INTO pilots (id, "name", created_at, updated_at) VALUES ('777a1a87-5ef7-4309-8814-0f1054751177', 'Snufkin', '2023-07-13 09:44:00.548', '2023-07-13 09:44:00.548')`) + assert.NoError(t, err, testDB.ID) + db.Close() + + // assert.NoError(t, m.ResetTestDatabase(ctx, hash, testDB.ID)) + + time.Sleep(100 * time.Millisecond) // sleep sufficient time to recreate the db by a worker (which should not happen) + + db, err = sql.Open("postgres", testDB.Config.ConnectionString()) + require.NoError(t, err) + require.NoError(t, db.PingContext(ctx)) + + // assert that the data is still there, even after ResetTestDatabase is called + row := db.QueryRowContext(ctx, "SELECT name FROM pilots WHERE id = '777a1a87-5ef7-4309-8814-0f1054751177'") + assert.NoError(t, row.Err()) + var name string + assert.NoError(t, row.Scan(&name)) + assert.Equal(t, "Snufkin", name) + db.Close() +} + func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { ctx := context.Background() diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 0d9834f..ef5ee35 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -34,8 +34,8 @@ type existingDB struct { db.TestDatabase } -// dbHashPool holds a test DB pool for a certain hash. Each dbHashPool is running cleanup workers in background. -type dbHashPool struct { +// HashPool holds a test DB pool for a certain hash. Each HashPool is running cleanup workers in background. +type HashPool struct { dbs []existingDB ready chan int // ID of initalized DBs according to a template, ready to pick them up waitingForCleaning chan int // ID of returned DBs, need to be recreated to reuse them @@ -49,28 +49,34 @@ type dbHashPool struct { wg sync.WaitGroup } -// newDBHashPool creates new hash pool with the given config. enableDBReset overwrites the config given in PoolConfig parameter. -func newDBHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc, enableDBReset bool) *dbHashPool { +// NewHashPool creates new hash pool with the given config. +// If EnableDBReset is true, cleanup workers start automatically. +func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *HashPool { - cfg.EnableDBReset = enableDBReset - - return &dbHashPool{ + pool := &HashPool{ dbs: make([]existingDB, 0, cfg.MaxPoolSize), ready: make(chan int, cfg.MaxPoolSize), waitingForCleaning: make(chan int, cfg.MaxPoolSize), dirty: make(chan int, 3*cfg.MaxPoolSize), // here indexes can be duplicated - recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), - templateDB: templateDB, - PoolConfig: cfg, + + recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), + templateDB: templateDB, + PoolConfig: cfg, } + + if pool.EnableDBReset { + pool.enableWorkers() + } + + return pool } -func (pool *dbHashPool) Stop() { +func (pool *HashPool) Stop() { close(pool.waitingForCleaning) pool.wg.Wait() } -func (pool *dbHashPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { +func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { var index int select { case <-time.After(timeout): @@ -111,7 +117,7 @@ func (pool *dbHashPool) GetTestDatabase(ctx context.Context, hash string, timeou } -func (pool *dbHashPool) AddTestDatabase(ctx context.Context, templateDB db.Database) error { +func (pool *HashPool) AddTestDatabase(ctx context.Context, templateDB db.Database) error { newTestDB, err := pool.extend(ctx, dbStateReady) if err != nil { @@ -130,7 +136,7 @@ func (pool *dbHashPool) AddTestDatabase(ctx context.Context, templateDB db.Datab return nil } -func (pool *dbHashPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { +func (pool *HashPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { // because we return it right away, we treat it as 'dirty' testDB, err := pool.extend(ctx, dbStateDirty) @@ -153,7 +159,7 @@ func (pool *dbHashPool) ExtendPool(ctx context.Context, templateDB db.Database) return testDB, nil } -func (pool *dbHashPool) ResetTestDatabase(ctx context.Context, hash string, id int) error { +func (pool *HashPool) ResetTestDatabase(ctx context.Context, hash string, id int) error { reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") pool.Lock() defer pool.Unlock() @@ -183,7 +189,7 @@ func (pool *dbHashPool) ResetTestDatabase(ctx context.Context, hash string, id i } -func (pool *dbHashPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { +func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { pool.Lock() defer pool.Unlock() @@ -211,19 +217,23 @@ func (pool *dbHashPool) ReturnTestDatabase(ctx context.Context, hash string, id } -func (pool *dbHashPool) enableWorker(numberOfWorkers int) { - for i := 0; i < numberOfWorkers; i++ { +func (pool *HashPool) enableWorkers() { + if !pool.EnableDBReset { + return + } + + for i := 0; i < pool.NumOfWorkers; i++ { pool.wg.Add(1) go func() { defer pool.wg.Done() - pool.workerCleanUpReturnedDB() + pool.workerCleanUpTask() }() } } -// workerCleanUpReturnedDB reads 'waitingForCleaning' channel and cleans up a test DB with the received index. +// workerCleanUpTask reads 'waitingForCleaning' channel and cleans up a test DB with the received index. // When the DB is recreated according to a template, its index goes to the 'ready' channel. -func (pool *dbHashPool) workerCleanUpReturnedDB() { +func (pool *HashPool) workerCleanUpTask() { for id := range pool.waitingForCleaning { if id == stopWorkerMessage { @@ -275,9 +285,9 @@ func (pool *dbHashPool) workerCleanUpReturnedDB() { } } -func (pool *dbHashPool) extend(ctx context.Context, state dbState) (db.TestDatabase, error) { +func (pool *HashPool) extend(ctx context.Context, state dbState) (db.TestDatabase, error) { // ! - // dbHashPool locked + // HashPool locked reg := trace.StartRegion(ctx, "extend_wait_for_lock_hash_pool") pool.Lock() defer pool.Unlock() @@ -312,14 +322,14 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState) (db.TestDatab pool.dbs = append(pool.dbs, newTestDB) return newTestDB.TestDatabase, nil - // dbHashPool unlocked + // HashPool unlocked // ! } // resetNotReturned recreates one DB that is 'dirty' and to which no db clients are connected (so it can be dropped). // If shouldKeepDirty is set to true, the DB state remains 'dirty'. Otherwise, it is marked as 'Ready' // and can be obtained again with GetTestDatabase request - in such case error is nil but returned db.TestDatabase is empty. -func (pool *dbHashPool) resetNotReturned(ctx context.Context, shouldKeepDirty bool) (db.TestDatabase, error) { +func (pool *HashPool) resetNotReturned(ctx context.Context, shouldKeepDirty bool) (db.TestDatabase, error) { var testDB existingDB var index int found := false @@ -337,7 +347,7 @@ func (pool *dbHashPool) resetNotReturned(ctx context.Context, shouldKeepDirty bo } // ! - // dbHashPool locked + // HashPool locked reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") pool.Lock() reg.End() @@ -398,11 +408,11 @@ func (pool *dbHashPool) resetNotReturned(ctx context.Context, shouldKeepDirty bo pool.ready <- index return db.TestDatabase{}, nil - // dbHashPool unlocked + // HashPool unlocked // ! } -func (pool *dbHashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) error { +func (pool *HashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) error { // stop the worker // we don't close here because if the remove operation fails, we want to be able to repeat it @@ -412,7 +422,7 @@ func (pool *dbHashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) pool.wg.Wait() // ! - // dbHashPool locked + // HashPool locked pool.Lock() defer pool.Unlock() @@ -438,6 +448,6 @@ func (pool *dbHashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) close(pool.waitingForCleaning) return nil - // dbHashPool unlocked + // HashPool unlocked // ! } diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index de54108..94bebf7 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -23,7 +23,7 @@ type PoolConfig struct { type PoolCollection struct { PoolConfig - pools map[string]*dbHashPool // map[hash] + pools map[string]*HashPool // map[hash] mutex sync.RWMutex } @@ -31,7 +31,7 @@ type PoolCollection struct { // Otherwise, test DB has to be returned when no longer needed and there are higher chances of getting ErrPoolFull when requesting a new DB. func NewPoolCollection(cfg PoolConfig) *PoolCollection { return &PoolCollection{ - pools: make(map[string]*dbHashPool), + pools: make(map[string]*HashPool), PoolConfig: cfg, } } @@ -56,10 +56,14 @@ func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Databas p.mutex.Lock() defer p.mutex.Unlock() - // create a new dbHashPool - pool := newDBHashPool(p.PoolConfig, templateDB, initDBFunc, enableDBReset) - // and start the cleaning worker - pool.enableWorker(p.NumOfWorkers) + cfg := p.PoolConfig + if p.EnableDBReset { + // only if the main config allows for DB reset, it can be enabled + cfg.EnableDBReset = enableDBReset + } + + // Create a new HashPool. If resetting is enabled, workers start automatically. + pool := NewHashPool(cfg, templateDB, initDBFunc) // pool is ready p.pools[pool.templateDB.TemplateHash] = pool @@ -190,7 +194,7 @@ func makeDBName(testDBPrefix string, hash string, id int) string { return fmt.Sprintf("%s%s_%03d", testDBPrefix, hash, id) } -func (p *PoolCollection) getPool(ctx context.Context, hash string) (pool *dbHashPool, err error) { +func (p *PoolCollection) getPool(ctx context.Context, hash string) (pool *HashPool, err error) { reg := trace.StartRegion(ctx, "wait_for_rlock_main_pool") p.mutex.RLock() defer p.mutex.RUnlock() @@ -205,7 +209,7 @@ func (p *PoolCollection) getPool(ctx context.Context, hash string) (pool *dbHash return pool, nil } -func (p *PoolCollection) getPoolLockCollection(ctx context.Context, hash string) (pool *dbHashPool, unlock func(), err error) { +func (p *PoolCollection) getPoolLockCollection(ctx context.Context, hash string) (pool *HashPool, unlock func(), err error) { reg := trace.StartRegion(ctx, "wait_for_lock_main_pool") p.mutex.Lock() unlock = func() { p.mutex.Unlock() } From 90f35b7db5f4d41e63120a3a6f7ae3296dd42192 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 12:53:21 +0000 Subject: [PATCH 121/160] rename /reset to /recreate --- internal/api/templates/routes.go | 2 +- internal/api/templates/templates.go | 4 +- pkg/manager/helpers_test.go | 6 +-- pkg/manager/manager.go | 30 +++++------ pkg/manager/manager_config.go | 4 +- pkg/manager/manager_test.go | 64 +++++++++++------------ pkg/pool/pool.go | 24 ++++----- pkg/pool/pool_collection.go | 24 ++++----- pkg/pool/pool_collection_test.go | 56 ++++++++++---------- pkg/templates/template.go | 8 +-- pkg/templates/template_collection_test.go | 6 +-- 11 files changed, 114 insertions(+), 114 deletions(-) diff --git a/internal/api/templates/routes.go b/internal/api/templates/routes.go index 33a26c5..2ede71f 100644 --- a/internal/api/templates/routes.go +++ b/internal/api/templates/routes.go @@ -11,7 +11,7 @@ func InitRoutes(s *api.Server) { g.GET("/:hash/tests", getTestDatabase(s)) g.DELETE("/:hash/tests/:id", deleteReturnTestDatabase(s)) // deprecated, use POST /unlock instead - g.POST("/:hash/tests/:id/reset", postResetTestDatabase(s)) + g.POST("/:hash/tests/:id/recreate", postRecreateTestDatabase(s)) g.POST("/:hash/tests/:id/unlock", postUnlockTestDatabase(s)) } diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 5025169..85984e2 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -126,7 +126,7 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return postUnlockTestDatabase(s) } -func postResetTestDatabase(s *api.Server) echo.HandlerFunc { +func postRecreateTestDatabase(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { hash := c.Param("hash") id, err := strconv.Atoi(c.Param("id")) @@ -134,7 +134,7 @@ func postResetTestDatabase(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusBadRequest, "invalid test database ID") } - if err := s.Manager.ResetTestDatabase(c.Request().Context(), hash, id); err != nil { + if err := s.Manager.RecreateTestDatabase(c.Request().Context(), hash, id); err != nil { switch err { case manager.ErrManagerNotReady: return echo.ErrServiceUnavailable diff --git a/pkg/manager/helpers_test.go b/pkg/manager/helpers_test.go index fa317f7..5ca8c04 100644 --- a/pkg/manager/helpers_test.go +++ b/pkg/manager/helpers_test.go @@ -51,12 +51,12 @@ func disconnectManager(t *testing.T, m *manager.Manager) { } -func initTemplateDB(ctx context.Context, errs chan<- error, m *manager.Manager, enableDBReset ...bool) { +func initTemplateDB(ctx context.Context, errs chan<- error, m *manager.Manager, enableDBRecreate ...bool) { // true by default - enableDBResetFlag := !(len(enableDBReset) > 0 && !enableDBReset[0]) + enableDBRecreateFlag := !(len(enableDBRecreate) > 0 && !enableDBRecreate[0]) - template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash", enableDBResetFlag) + template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash", enableDBRecreateFlag) if err != nil { errs <- err return diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 522eac1..9c9fd9e 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -58,7 +58,7 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { MaxPoolSize: config.TestDatabaseMaxPoolSize, TestDBNamePrefix: testDBPrefix, NumOfWorkers: config.NumOfCleaningWorkers, - EnableDBReset: config.TestDatabaseEnableReset, + EnableDBRecreate: config.TestDatabaseEnableRecreate, }, ), connectionCtx: context.TODO(), @@ -178,7 +178,7 @@ func (m *Manager) Initialize(ctx context.Context) error { return nil } -func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, enableDBReset bool) (db.TemplateDatabase, error) { +func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, enableDBRecreate bool) (db.TemplateDatabase, error) { ctx, task := trace.NewTask(ctx, "initialize_template_db") defer task.End() @@ -186,9 +186,9 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, e return db.TemplateDatabase{}, ErrManagerNotReady } - if !m.config.TestDatabaseEnableReset { - // only if the main config allows for DB reset, it can be enabled - enableDBReset = false + if !m.config.TestDatabaseEnableRecreate { + // only if the main config allows for DB recreate, it can be enabled + enableDBRecreate = false } dbName := m.makeTemplateDatabaseName(hash) @@ -200,7 +200,7 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, e Password: m.config.ManagerDatabaseConfig.Password, Database: dbName, }, - ResetEnabled: enableDBReset, + RecreateEnabled: enableDBRecreate, } added, unlock := m.templates.Push(ctx, hash, templateConfig) @@ -299,7 +299,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db } // Init a pool with this hash - m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB, template.ResetEnabled) + m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB, template.RecreateEnabled) lockedTemplate.SetState(ctx, templates.TemplateStateFinalized) m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) @@ -343,7 +343,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData // Template exists, but the pool is not there - // it must have been removed. // It needs to be reinitialized. - m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB, template.IsResetEnabled(ctx)) + m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB, template.IsRecreateEnabled(ctx)) // pool initalized, create one test db testDB, err = m.pool.ExtendPool(ctx, template.Database) @@ -356,7 +356,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData return db.TestDatabase{}, err } - if !template.IsResetEnabled(ctx) { + if !template.IsRecreateEnabled(ctx) { // before returning create a new test database in background m.wg.Add(1) go func(ctx context.Context, templ *templates.Template) { @@ -407,9 +407,9 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e return nil } -// ResetTestDatabase recreates the test DB according to the template and returns it back to the pool. -func (m *Manager) ResetTestDatabase(ctx context.Context, hash string, id int) error { - ctx, task := trace.NewTask(ctx, "reset_test_db") +// RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. +func (m *Manager) RecreateTestDatabase(ctx context.Context, hash string, id int) error { + ctx, task := trace.NewTask(ctx, "recreate_test_db") defer task.End() if !m.Ready() { @@ -422,8 +422,8 @@ func (m *Manager) ResetTestDatabase(ctx context.Context, hash string, id int) er return m.dropDatabaseWithID(ctx, hash, id) } - // don't allow to reset if it's not enabled for this template - if !template.IsResetEnabled(ctx) { + // don't allow to recreate if it's not enabled for this template + if !template.IsRecreateEnabled(ctx) { return nil } @@ -434,7 +434,7 @@ func (m *Manager) ResetTestDatabase(ctx context.Context, hash string, id int) er } // template is ready, we can returb the testDB to the pool and have it cleaned up - if err := m.pool.ResetTestDatabase(ctx, hash, id); err != nil { + if err := m.pool.RecreateTestDatabase(ctx, hash, id); err != nil { if !(errors.Is(err, pool.ErrInvalidIndex) || errors.Is(err, pool.ErrUnknownHash)) { // other error is an internal error diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index a218a48..2ffce94 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -21,7 +21,7 @@ type ManagerConfig struct { TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state TestDatabaseGetTimeout time.Duration // Time to wait for a ready database before extending the pool NumOfCleaningWorkers int // Number of pool workers cleaning up dirty DBs - TestDatabaseEnableReset bool // Enables resetting test databases with the cleanup workers. If this flag is on, it's no longer possible to reuse dirty (currently in use, 'locked') databases when MAX pool size is reached. + TestDatabaseEnableRecreate bool // Enables recreating test databases with the cleanup workers. If this flag is on, it's no longer possible to reuse dirty (currently in use, 'locked') databases when MAX pool size is reached. } func DefaultManagerConfigFromEnv() ManagerConfig { @@ -61,6 +61,6 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 20000)), TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), NumOfCleaningWorkers: util.GetEnvAsInt("INTEGRESQL_NUM_OF_CLEANING_WORKERS", 3), - TestDatabaseEnableReset: util.GetEnvAsBool("INTEGRESQL_TEST_DB_ENABLE_RESET", false), + TestDatabaseEnableRecreate: util.GetEnvAsBool("INTEGRESQL_TEST_DB_ENABLE_RECREATE", false), } } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 74d3748..6af690d 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -106,7 +106,7 @@ func TestManagerInitializeTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -128,7 +128,7 @@ func TestManagerInitializeTemplateDatabaseTimeout(t *testing.T) { ctxt, cancel := context.WithTimeout(ctx, 10*time.Nanosecond) defer cancel() - _, err := m.InitializeTemplateDatabase(ctxt, hash, true /* enableDBReset */) + _, err := m.InitializeTemplateDatabase(ctxt, hash, true /* enableDBRecreate */) if err != context.DeadlineExceeded { t.Fatalf("received unexpected error, got %v, want %v", err, context.DeadlineExceeded) } @@ -204,7 +204,7 @@ func TestManagerFinalizeTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -286,7 +286,7 @@ func TestManagerGetTestDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -322,7 +322,7 @@ func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -354,7 +354,7 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -416,7 +416,7 @@ func TestManagerGetTestDatabaseConcurrently(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -482,7 +482,7 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -550,7 +550,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -606,7 +606,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { t.Fatalf("finalize template should not work: %v", err) } - _, err = m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) + _, err = m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) if err != nil { t.Fatalf("reinitialize after discard template database should work: %v", err) } @@ -623,7 +623,7 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseInitialPoolSize = 3 cfg.TestDatabaseMaxPoolSize = 3 - cfg.TestDatabaseEnableReset = true + cfg.TestDatabaseEnableRecreate = true cfg.TestDatabaseGetTimeout = 200 * time.Millisecond m, _ := testManagerWithConfig(cfg) @@ -635,7 +635,7 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -670,7 +670,7 @@ func TestManagerGetTestDatabaseExtendingPoolForceReturn(t *testing.T) { cfg.TestDatabaseMaxPoolSize = 10 cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond // force DB return - cfg.TestDatabaseEnableReset = true + cfg.TestDatabaseEnableRecreate = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -681,7 +681,7 @@ func TestManagerGetTestDatabaseExtendingPoolForceReturn(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -724,7 +724,7 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { cfg.TestDatabaseInitialPoolSize = 5 cfg.TestDatabaseMaxPoolSize = 5 // enable reusing old not returned databases - cfg.TestDatabaseEnableReset = false + cfg.TestDatabaseEnableRecreate = false m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -735,7 +735,7 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate /*enableDBRecreate */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -788,14 +788,14 @@ func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { } } -func TestManagerReturnResetTestDatabase(t *testing.T) { +func TestManagerReturnRecreateTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseInitialPoolSize = 10 cfg.NumOfCleaningWorkers = 2 cfg.TestDatabaseMaxPoolSize = 10 - cfg.TestDatabaseEnableReset = true + cfg.TestDatabaseEnableRecreate = true cfg.TestDatabaseGetTimeout = 200 * time.Millisecond tests := []struct { @@ -804,9 +804,9 @@ func TestManagerReturnResetTestDatabase(t *testing.T) { resultCheck func(row *sql.Row, id int) }{ { - name: "Reset", + name: "Recreate", giveBackFunc: func(m *manager.Manager, ctx context.Context, hash string, id int) error { - return m.ResetTestDatabase(ctx, hash, id) + return m.RecreateTestDatabase(ctx, hash, id) }, resultCheck: func(row *sql.Row, id int) { assert.NoError(t, row.Err()) @@ -842,7 +842,7 @@ func TestManagerReturnResetTestDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -870,7 +870,7 @@ func TestManagerReturnResetTestDatabase(t *testing.T) { _, err = m.GetTestDatabase(ctx, hash) assert.ErrorIs(t, err, pool.ErrPoolFull) - // reset or return test database + // recreate or return test database for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { assert.NoError(t, tt.giveBackFunc(m, ctx, hash, i), i) } @@ -892,14 +892,14 @@ func TestManagerReturnResetTestDatabase(t *testing.T) { } } -func TestManagerResetTestDatabaseRecreateDisabled(t *testing.T) { +func TestManagerRecreateTestDatabaseRecreateDisabled(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseInitialPoolSize = 5 cfg.NumOfCleaningWorkers = 2 cfg.TestDatabaseMaxPoolSize = 10 - cfg.TestDatabaseEnableReset = true + cfg.TestDatabaseEnableRecreate = true cfg.TestDatabaseGetTimeout = 200 * time.Millisecond m, _ := testManagerWithConfig(cfg) @@ -912,7 +912,7 @@ func TestManagerResetTestDatabaseRecreateDisabled(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, false /*enableReset*/) + template, err := m.InitializeTemplateDatabase(ctx, hash, false /*enableRecreate*/) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -935,7 +935,7 @@ func TestManagerResetTestDatabaseRecreateDisabled(t *testing.T) { assert.NoError(t, err, testDB.ID) db.Close() - // assert.NoError(t, m.ResetTestDatabase(ctx, hash, testDB.ID)) + // assert.NoError(t, m.RecreateTestDatabase(ctx, hash, testDB.ID)) time.Sleep(100 * time.Millisecond) // sleep sufficient time to recreate the db by a worker (which should not happen) @@ -943,7 +943,7 @@ func TestManagerResetTestDatabaseRecreateDisabled(t *testing.T) { require.NoError(t, err) require.NoError(t, db.PingContext(ctx)) - // assert that the data is still there, even after ResetTestDatabase is called + // assert that the data is still there, even after RecreateTestDatabase is called row := db.QueryRowContext(ctx, "SELECT name FROM pilots WHERE id = '777a1a87-5ef7-4309-8814-0f1054751177'") assert.NoError(t, row.Err()) var name string @@ -964,7 +964,7 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -1012,7 +1012,7 @@ func TestManagerReturnUnknownTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -1042,7 +1042,7 @@ func TestManagerMultiFinalize(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBReset */) + template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -1090,7 +1090,7 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() // there are no db added in background cfg.TestDatabaseInitialPoolSize = 0 - cfg.TestDatabaseEnableReset = true + cfg.TestDatabaseEnableRecreate = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -1101,7 +1101,7 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableReset) + template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index ef5ee35..009f870 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -50,7 +50,7 @@ type HashPool struct { } // NewHashPool creates new hash pool with the given config. -// If EnableDBReset is true, cleanup workers start automatically. +// If EnableDBRecreate is true, cleanup workers start automatically. func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *HashPool { pool := &HashPool{ @@ -64,7 +64,7 @@ func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFu PoolConfig: cfg, } - if pool.EnableDBReset { + if pool.EnableDBRecreate { pool.enableWorkers() } @@ -121,9 +121,9 @@ func (pool *HashPool) AddTestDatabase(ctx context.Context, templateDB db.Databas newTestDB, err := pool.extend(ctx, dbStateReady) if err != nil { - if errors.Is(err, ErrPoolFull) && !pool.EnableDBReset { - // we can try to reset test databases that are 'dirty' - _, err := pool.resetNotReturned(ctx, false /* shouldKeepDirty */) + if errors.Is(err, ErrPoolFull) && !pool.EnableDBRecreate { + // we can try to recreate test databases that are 'dirty' + _, err := pool.recreateDirtyDB(ctx, false /* shouldKeepDirty */) return err } @@ -141,9 +141,9 @@ func (pool *HashPool) ExtendPool(ctx context.Context, templateDB db.Database) (d // because we return it right away, we treat it as 'dirty' testDB, err := pool.extend(ctx, dbStateDirty) if err != nil { - if errors.Is(err, ErrPoolFull) && !pool.EnableDBReset { - // we can try to reset test databases that are 'dirty' - return pool.resetNotReturned(ctx, true /* shouldKeepDirty */) + if errors.Is(err, ErrPoolFull) && !pool.EnableDBRecreate { + // we can try to recreate test databases that are 'dirty' + return pool.recreateDirtyDB(ctx, true /* shouldKeepDirty */) } return db.TestDatabase{}, err @@ -159,7 +159,7 @@ func (pool *HashPool) ExtendPool(ctx context.Context, templateDB db.Database) (d return testDB, nil } -func (pool *HashPool) ResetTestDatabase(ctx context.Context, hash string, id int) error { +func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id int) error { reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") pool.Lock() defer pool.Unlock() @@ -218,7 +218,7 @@ func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id in } func (pool *HashPool) enableWorkers() { - if !pool.EnableDBReset { + if !pool.EnableDBRecreate { return } @@ -326,10 +326,10 @@ func (pool *HashPool) extend(ctx context.Context, state dbState) (db.TestDatabas // ! } -// resetNotReturned recreates one DB that is 'dirty' and to which no db clients are connected (so it can be dropped). +// recreateDirtyDB recreates one DB that is 'dirty' and to which no db clients are connected (so it can be dropped). // If shouldKeepDirty is set to true, the DB state remains 'dirty'. Otherwise, it is marked as 'Ready' // and can be obtained again with GetTestDatabase request - in such case error is nil but returned db.TestDatabase is empty. -func (pool *HashPool) resetNotReturned(ctx context.Context, shouldKeepDirty bool) (db.TestDatabase, error) { +func (pool *HashPool) recreateDirtyDB(ctx context.Context, shouldKeepDirty bool) (db.TestDatabase, error) { var testDB existingDB var index int found := false diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index 94bebf7..f68535d 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -17,7 +17,7 @@ type PoolConfig struct { MaxPoolSize int TestDBNamePrefix string NumOfWorkers int // Number of cleaning workers (each hash pool runs this number of workers). - EnableDBReset bool // Enables resetting test databases with the cleanup workers. If this flag is on, it's no longer possible to reuse dirty (currently in use, 'locked') databases when MAX pool size is reached. + EnableDBRecreate bool // Enables recreating test databases with the cleanup workers. If this flag is on, it's no longer possible to reuse dirty (currently in use, 'locked') databases when MAX pool size is reached. } type PoolCollection struct { @@ -27,7 +27,7 @@ type PoolCollection struct { mutex sync.RWMutex } -// enableDBReset set to false will allow reusing test databases that are marked as 'dirty'. +// enableDBRecreate set to false will allow reusing test databases that are marked as 'dirty'. // Otherwise, test DB has to be returned when no longer needed and there are higher chances of getting ErrPoolFull when requesting a new DB. func NewPoolCollection(cfg PoolConfig) *PoolCollection { return &PoolCollection{ @@ -52,17 +52,17 @@ func makeActualRecreateTestDBFunc(templateName string, userRecreateFunc Recreate type recreateTestDBFunc func(context.Context, *existingDB) error // InitHashPool creates a new pool with a given template hash and starts the cleanup workers. -func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc, enableDBReset bool) { +func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc, enableDBRecreate bool) { p.mutex.Lock() defer p.mutex.Unlock() cfg := p.PoolConfig - if p.EnableDBReset { - // only if the main config allows for DB reset, it can be enabled - cfg.EnableDBReset = enableDBReset + if p.EnableDBRecreate { + // only if the main config allows for DB recreate, it can be enabled + cfg.EnableDBRecreate = enableDBRecreate } - // Create a new HashPool. If resetting is enabled, workers start automatically. + // Create a new HashPool. If recreating is enabled, workers start automatically. pool := NewHashPool(cfg, templateDB, initDBFunc) // pool is ready @@ -95,8 +95,8 @@ func (p *PoolCollection) GetTestDatabase(ctx context.Context, hash string, timeo // AddTestDatabase adds a new test DB to the pool and creates it according to the template. // The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. -// If the pool size has already reached MAX, ErrPoolFull is returned, unless EnableDBReset flag is set to false. -// Then databases that were given away would get reset (if no DB connection is currently open) and marked as 'Ready'. +// If the pool size has already reached MAX, ErrPoolFull is returned, unless EnableDBRecreate flag is set to false. +// Then databases that were given away would get recreate (if no DB connection is currently open) and marked as 'Ready'. func (p *PoolCollection) AddTestDatabase(ctx context.Context, templateDB db.Database) error { hash := templateDB.TemplateHash @@ -121,17 +121,17 @@ func (p *PoolCollection) ExtendPool(ctx context.Context, templateDB db.Database) return pool.ExtendPool(ctx, templateDB) } -// ResetTestDatabase recreates the given test DB and returns it back to the pool. +// RecreateTestDatabase recreates the given test DB and returns it back to the pool. // To have it recreated, it is added to 'waitingForCleaning' channel. // If the test DB is in a different state than 'dirty', ErrInvalidState is returned. -func (p *PoolCollection) ResetTestDatabase(ctx context.Context, hash string, id int) error { +func (p *PoolCollection) RecreateTestDatabase(ctx context.Context, hash string, id int) error { pool, err := p.getPool(ctx, hash) if err != nil { return err } - return pool.ResetTestDatabase(ctx, hash, id) + return pool.RecreateTestDatabase(ctx, hash, id) } // ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index c3ebfac..5e2f2f0 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -19,7 +19,7 @@ func TestPoolAddGet(t *testing.T) { MaxPoolSize: 2, NumOfWorkers: 4, TestDBNamePrefix: "prefix_", - EnableDBReset: true, + EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) @@ -36,7 +36,7 @@ func TestPoolAddGet(t *testing.T) { t.Log("(re)create ", testDB.Database) return nil } - p.InitHashPool(ctx, templateDB, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB, initFunc, true /*enableDBRecreate*/) // get from empty _, err := p.GetTestDatabase(ctx, hash1, 0) @@ -53,7 +53,7 @@ func TestPoolAddGet(t *testing.T) { // add for h2 templateDB2 := templateDB templateDB2.TemplateHash = hash2 - p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) assert.ErrorIs(t, p.AddTestDatabase(ctx, templateDB2), pool.ErrPoolFull) @@ -95,7 +95,7 @@ func TestPoolAddGetConcurrent(t *testing.T) { MaxPoolSize: 6, NumOfWorkers: 4, TestDBNamePrefix: "", - EnableDBReset: true, + EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) @@ -103,8 +103,8 @@ func TestPoolAddGetConcurrent(t *testing.T) { sleepDuration := 100 * time.Millisecond // initialize hash pool - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) - p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) // add DB in one goroutine wg.Add(1) @@ -166,11 +166,11 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { MaxPoolSize: 6, NumOfWorkers: 4, TestDBNamePrefix: "", - EnableDBReset: true, + EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) - p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) var wg sync.WaitGroup @@ -226,11 +226,11 @@ func TestPoolRemoveAll(t *testing.T) { MaxPoolSize: 6, NumOfWorkers: 4, TestDBNamePrefix: "", - EnableDBReset: true, + EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) - p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) // add DBs sequentially for i := 0; i < cfg.MaxPoolSize; i++ { @@ -248,7 +248,7 @@ func TestPoolRemoveAll(t *testing.T) { assert.Error(t, err, pool.ErrTimeout) // start using pool again - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) testDB, err := p.GetTestDatabase(ctx, hash1, 0) assert.NoError(t, err) @@ -275,10 +275,10 @@ func TestPoolInit(t *testing.T) { MaxPoolSize: 100, NumOfWorkers: 150, TestDBNamePrefix: "", - EnableDBReset: true, + EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) // we will test 2 ways of adding new DBs for i := 0; i < cfg.MaxPoolSize/2; i++ { @@ -347,10 +347,10 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { MaxPoolSize: 40, NumOfWorkers: 1, TestDBNamePrefix: "test_", - EnableDBReset: false, + EnableDBRecreate: false, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, false /*enableDBReset*/) + p.InitHashPool(ctx, templateDB1, initFunc, false /*enableDBRecreate*/) for i := 0; i < cfg.MaxPoolSize; i++ { // add and get freshly added DB @@ -409,12 +409,12 @@ func TestPoolReturnTestDatabase(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 40, - NumOfWorkers: 3, - EnableDBReset: true, + MaxPoolSize: 40, + NumOfWorkers: 3, + EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) for i := 0; i < cfg.MaxPoolSize; i++ { testDB, err := p.ExtendPool(ctx, templateDB1) @@ -432,7 +432,7 @@ func TestPoolReturnTestDatabase(t *testing.T) { p.Stop() } -func TestPoolResetTestDatabase(t *testing.T) { +func TestPoolRecreateTestDatabase(t *testing.T) { t.Parallel() ctx := context.Background() @@ -455,18 +455,18 @@ func TestPoolResetTestDatabase(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 40, - NumOfWorkers: 3, - EnableDBReset: true, + MaxPoolSize: 40, + NumOfWorkers: 3, + EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBReset*/) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) for i := 0; i < cfg.MaxPoolSize; i++ { testDB, err := p.ExtendPool(ctx, templateDB1) assert.NoError(t, err) - // reset - add for cleaning - assert.NoError(t, p.ResetTestDatabase(ctx, hash1, testDB.ID)) + // recreate - add for cleaning + assert.NoError(t, p.RecreateTestDatabase(ctx, hash1, testDB.ID)) } time.Sleep(100 * time.Millisecond) // wait a tiny bit to have all DB cleaned up diff --git a/pkg/templates/template.go b/pkg/templates/template.go index 3dbd328..2fe4d6f 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -28,7 +28,7 @@ type Template struct { type TemplateConfig struct { db.DatabaseConfig - ResetEnabled bool + RecreateEnabled bool } func NewTemplate(hash string, config TemplateConfig) *Template { @@ -42,11 +42,11 @@ func NewTemplate(hash string, config TemplateConfig) *Template { return t } -func (t *Template) IsResetEnabled(ctx context.Context) bool { +func (t *Template) IsRecreateEnabled(ctx context.Context) bool { t.mutex.RLock() defer t.mutex.RUnlock() - return t.ResetEnabled + return t.RecreateEnabled } func (t *Template) GetConfig(ctx context.Context) TemplateConfig { @@ -131,6 +131,6 @@ func (l lockedTemplate) SetState(ctx context.Context, newState TemplateState) { } func (c TemplateConfig) Equals(other TemplateConfig) bool { - return c.ResetEnabled == other.ResetEnabled && + return c.RecreateEnabled == other.RecreateEnabled && c.DatabaseConfig.Database == other.DatabaseConfig.Database } diff --git a/pkg/templates/template_collection_test.go b/pkg/templates/template_collection_test.go index 99a8b94..4d4f955 100644 --- a/pkg/templates/template_collection_test.go +++ b/pkg/templates/template_collection_test.go @@ -70,7 +70,7 @@ func TestTemplateCollectionPushWithOtherConfig(t *testing.T) { Username: "ich", Database: "template_test", }, - ResetEnabled: true, + RecreateEnabled: true, } hash := "123" @@ -82,7 +82,7 @@ func TestTemplateCollectionPushWithOtherConfig(t *testing.T) { assert.False(t, added) unlock() - cfg.ResetEnabled = false + cfg.RecreateEnabled = false cfg.Database = "template_another" added, unlock = coll.Push(ctx, hash, cfg) assert.True(t, added) @@ -91,7 +91,7 @@ func TestTemplateCollectionPushWithOtherConfig(t *testing.T) { // try to get again when the template is locked template, found := coll.Get(ctx, hash) assert.True(t, found) - assert.False(t, template.ResetEnabled) + assert.False(t, template.RecreateEnabled) assert.Equal(t, "template_another", template.Config.Database) } From 9f33c803dab5ea52b4839b6915382d13ffdb3d53 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 13:03:40 +0000 Subject: [PATCH 122/160] add missing defer statement --- pkg/pool/pool_collection.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index f68535d..8a90f05 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -184,7 +184,7 @@ func (p *PoolCollection) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) // MakeDBName makes a test DB name with the configured prefix, template hash and ID of the DB. func (p *PoolCollection) MakeDBName(hash string, id int) string { p.mutex.RLock() - p.mutex.RUnlock() + defer p.mutex.RUnlock() return makeDBName(p.PoolConfig.TestDBNamePrefix, hash, id) } From c052a247a2bb56bb8d27bb2d9df05dc520483393 Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 14 Jul 2023 13:07:37 +0000 Subject: [PATCH 123/160] rename enableReset to enableRecreate in api --- internal/api/templates/templates.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 85984e2..4ef7675 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -14,8 +14,8 @@ import ( func postInitializeTemplate(s *api.Server) echo.HandlerFunc { type requestPayload struct { - Hash string `json:"hash"` - EnableDBReset bool `json:"enableReset"` + Hash string `json:"hash"` + EnableDBRecreate bool `json:"enableRecreate"` } return func(c echo.Context) error { @@ -32,7 +32,7 @@ func postInitializeTemplate(s *api.Server) echo.HandlerFunc { ctx, cancel := context.WithTimeout(c.Request().Context(), 30*time.Second) defer cancel() - template, err := s.Manager.InitializeTemplateDatabase(ctx, payload.Hash, payload.EnableDBReset) + template, err := s.Manager.InitializeTemplateDatabase(ctx, payload.Hash, payload.EnableDBRecreate) if err != nil { switch err { case manager.ErrManagerNotReady: From 9a06c944b252374375425a56f8675ced01aa2561 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 19 Jul 2023 07:41:36 +0000 Subject: [PATCH 124/160] write to dirty only if /recreate not enabled --- pkg/pool/pool.go | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 009f870..5239434 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -149,11 +149,8 @@ func (pool *HashPool) ExtendPool(ctx context.Context, templateDB db.Database) (d return db.TestDatabase{}, err } - select { - case pool.dirty <- testDB.ID: - // sent to dirty without blocking - default: - // channel is full + if !pool.EnableDBRecreate { + pool.dirty <- testDB.ID } return testDB, nil @@ -368,12 +365,10 @@ func (pool *HashPool) recreateDirtyDB(ctx context.Context, shouldKeepDirty bool) if err := pool.recreateDB(ctx, &testDB); err != nil { // this database remains 'dirty' - select { - case pool.dirty <- index: - // sent to dirty without blocking - default: - // channel is full + if !pool.EnableDBRecreate { + pool.dirty <- testDB.ID } + continue } @@ -392,11 +387,8 @@ func (pool *HashPool) recreateDirtyDB(ctx context.Context, shouldKeepDirty bool) testDB.state = dbStateDirty pool.dbs[index] = testDB - select { - case pool.dirty <- index: - // sent to dirty without blocking - default: - // channel is full + if !pool.EnableDBRecreate { + pool.dirty <- testDB.ID } return testDB.TestDatabase, nil From 5a06623bc709626c8f46a9be9133ad5630debac8 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Wed, 19 Jul 2023 09:59:27 +0200 Subject: [PATCH 125/160] checkDatabaseConnected, integrate legacy handling with background workers, saner config defaults, legacy handling should strive to keep INTEGRESQL_TEST_INITIAL_POOL_SIZE available all time --- docker-compose.yml | 7 +- internal/api/templates/templates.go | 8 +- pkg/manager/manager.go | 91 ++++++++---- pkg/manager/manager_config.go | 21 +-- pkg/manager/manager_test.go | 102 ++++++++++++++ pkg/pool/pool.go | 205 ++++++++++++++-------------- pkg/pool/pool_test.go | 16 ++- pkg/templates/template_test.go | 94 +++++++------ 8 files changed, 341 insertions(+), 203 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 75c3afd..0aa1b13 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,15 +5,16 @@ services: build: context: . target: development - ports: - - "5000:5000" + # ports: + # - "5000:5000" working_dir: /app volumes: - .:/app #:delegated # - ./.pkg:/go/pkg # enable this to reuse the pkg cache depends_on: - postgres - environment: &SERVICE_ENV + environment: + &SERVICE_ENV PGDATABASE: &PSQL_DBNAME "sample" PGUSER: &PSQL_USER "dbuser" PGPASSWORD: &PSQL_PASS "testpass" diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 1349242..80f4902 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -141,8 +141,8 @@ func postRestoreTestDatabase(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusNotFound, "template not found") case manager.ErrTestNotFound: return echo.NewHTTPError(http.StatusNotFound, "test database not found") - case manager.ErrTestDBInUse: - return echo.NewHTTPError(http.StatusLocked, manager.ErrTestDBInUse.Error()) + case pool.ErrTestDBInUse: + return echo.NewHTTPError(http.StatusLocked, pool.ErrTestDBInUse.Error()) default: return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } @@ -171,8 +171,8 @@ func postUnlockTestDatabase(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusNotFound, "template not found") case manager.ErrTestNotFound: return echo.NewHTTPError(http.StatusNotFound, "test database not found") - case manager.ErrTestDBInUse: - return echo.NewHTTPError(http.StatusLocked, manager.ErrTestDBInUse.Error()) + case pool.ErrTestDBInUse: + return echo.NewHTTPError(http.StatusLocked, pool.ErrTestDBInUse.Error()) default: return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 8e577d8..96fc04f 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -23,7 +23,6 @@ var ( ErrTestNotFound = errors.New("test database not found") ErrTemplateDiscarded = errors.New("template is discarded, can't be used") ErrInvalidTemplateState = errors.New("unexpected template state") - ErrTestDBInUse = errors.New("test database is in use, close the connection before dropping") ) type Manager struct { @@ -48,34 +47,42 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { testDBPrefix = testDBPrefix + fmt.Sprintf("%s_", config.TestDatabasePrefix) } + finalConfig := config + + if len(finalConfig.TestDatabaseOwner) == 0 { + finalConfig.TestDatabaseOwner = finalConfig.ManagerDatabaseConfig.Username + } + + if len(finalConfig.TestDatabaseOwnerPassword) == 0 { + finalConfig.TestDatabaseOwnerPassword = finalConfig.ManagerDatabaseConfig.Password + } + + // Legacy handling does not support TestDatabaseInitialPoolSize=0 + if !finalConfig.TestDatabaseForceReturn && finalConfig.TestDatabaseInitialPoolSize == 0 { + finalConfig.TestDatabaseInitialPoolSize = 1 + } + + if finalConfig.TestDatabaseInitialPoolSize > finalConfig.TestDatabaseMaxPoolSize && finalConfig.TestDatabaseMaxPoolSize > 0 { + finalConfig.TestDatabaseInitialPoolSize = finalConfig.TestDatabaseMaxPoolSize + } + m := &Manager{ - config: config, + config: finalConfig, db: nil, wg: sync.WaitGroup{}, templates: templates.NewCollection(), pool: pool.NewDBPool( pool.PoolConfig{ - MaxPoolSize: config.TestDatabaseMaxPoolSize, + MaxPoolSize: finalConfig.TestDatabaseMaxPoolSize, + InitialPoolSize: finalConfig.TestDatabaseInitialPoolSize, TestDBNamePrefix: testDBPrefix, - NumOfWorkers: config.NumOfCleaningWorkers, - ForceDBReturn: config.TestDatabaseForceReturn, + NumOfWorkers: finalConfig.NumOfCleaningWorkers, + ForceDBReturn: finalConfig.TestDatabaseForceReturn, }, ), connectionCtx: context.TODO(), } - if len(m.config.TestDatabaseOwner) == 0 { - m.config.TestDatabaseOwner = m.config.ManagerDatabaseConfig.Username - } - - if len(m.config.TestDatabaseOwnerPassword) == 0 { - m.config.TestDatabaseOwnerPassword = m.config.ManagerDatabaseConfig.Password - } - - if m.config.TestDatabaseInitialPoolSize > m.config.TestDatabaseMaxPoolSize && m.config.TestDatabaseMaxPoolSize > 0 { - m.config.TestDatabaseInitialPoolSize = m.config.TestDatabaseMaxPoolSize - } - return m, m.config } @@ -342,17 +349,6 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData return db.TestDatabase{}, err } - if !m.config.TestDatabaseForceReturn { - // before returning create a new test database in background - m.wg.Add(1) - go func(ctx context.Context, templ *templates.Template) { - defer m.wg.Done() - if err := m.createTestDatabaseFromTemplate(ctx, templ); err != nil { - fmt.Printf("integresql: failed to create a new DB in background: %v\n", err) - } - }(m.connectionCtx, template) - } - return testDB, nil } @@ -481,6 +477,9 @@ func (m *Manager) dropDatabaseWithID(ctx context.Context, hash string, id int) e func (m *Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, error) { var exists bool + + // fmt.Printf("SELECT 1 AS exists FROM pg_database WHERE datname = %s\n", dbName) + if err := m.db.QueryRowContext(ctx, "SELECT 1 AS exists FROM pg_database WHERE datname = $1", dbName).Scan(&exists); err != nil { if err == sql.ErrNoRows { return false, nil @@ -492,10 +491,31 @@ func (m *Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, return exists, nil } +func (m *Manager) checkDatabaseConnected(ctx context.Context, dbName string) (bool, error) { + + var countConnected int + + if err := m.db.QueryRowContext(ctx, "SELECT count(pid) FROM pg_stat_activity WHERE datname = $1", dbName).Scan(&countConnected); err != nil { + if err == sql.ErrNoRows { + return false, nil + } + + return false, err + } + + if countConnected > 0 { + return true, nil + } + + return false, nil +} + func (m *Manager) createDatabase(ctx context.Context, dbName string, owner string, template string) error { defer trace.StartRegion(ctx, "create_db").End() + // fmt.Printf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s\n", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template)) + if _, err := m.db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template))); err != nil { return err } @@ -504,6 +524,17 @@ func (m *Manager) createDatabase(ctx context.Context, dbName string, owner strin } func (m *Manager) recreateTestDB(ctx context.Context, testDB db.TestDatabase, templateName string) error { + + connected, err := m.checkDatabaseConnected(ctx, testDB.Database.Config.Database) + + if err != nil { + return err + } + + if connected { + return pool.ErrTestDBInUse + } + return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) } @@ -511,9 +542,11 @@ func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { defer trace.StartRegion(ctx, "drop_db").End() + // fmt.Printf("DROP DATABASE IF EXISTS %s\n", pq.QuoteIdentifier(dbName)) + if _, err := m.db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { if strings.Contains(err.Error(), "is being accessed by other users") { - return ErrTestDBInUse + return pool.ErrTestDBInUse } return err diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 8fac160..1cf54d2 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -1,6 +1,7 @@ package manager import ( + "runtime" "time" "github.com/allaboutapps/integresql/pkg/db" @@ -16,7 +17,7 @@ type ManagerConfig struct { TestDatabasePrefix string TestDatabaseOwner string TestDatabaseOwnerPassword string - TestDatabaseInitialPoolSize int // Initial number of read DBs prepared in background + TestDatabaseInitialPoolSize int // Initial number of ready DBs prepared in background TestDatabaseMaxPoolSize int // Maximal pool size that won't be exceeded TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state TestDatabaseGetTimeout time.Duration // Time to wait for a ready database before extending the pool @@ -54,13 +55,15 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TestDatabasePrefix: util.GetEnv("INTEGRESQL_TEST_DB_PREFIX", "test"), // reuse the same user (PGUSER) and passwort (PGPASSWORT) for the test / template databases by default - TestDatabaseOwner: util.GetEnv("INTEGRESQL_TEST_PGUSER", util.GetEnv("INTEGRESQL_PGUSER", util.GetEnv("PGUSER", "postgres"))), - TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), - TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), - TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), - TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 20000)), - TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), - NumOfCleaningWorkers: util.GetEnvAsInt("INTEGRESQL_NUM_OF_CLEANING_WORKERS", 3), - TestDatabaseForceReturn: util.GetEnvAsBool("INTEGRESQL_TEST_DB_FORCE_RETURN", false), + TestDatabaseOwner: util.GetEnv("INTEGRESQL_TEST_PGUSER", util.GetEnv("INTEGRESQL_PGUSER", util.GetEnv("PGUSER", "postgres"))), + TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), + // TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), + TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", runtime.NumCPU()), + // TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), + TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", runtime.NumCPU()*4), + TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 60000)), // TODO eventually even bigger defaults? + TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), // only used when INTEGRESQL_TEST_DB_FORCE_RETURN=true + NumOfCleaningWorkers: util.GetEnvAsInt("INTEGRESQL_NUM_OF_CLEANING_WORKERS", runtime.NumCPU()), + TestDatabaseForceReturn: util.GetEnvAsBool("INTEGRESQL_TEST_DB_FORCE_RETURN", false), } } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 21b7b6a..8a6054b 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -305,6 +305,40 @@ func TestManagerGetTestDatabase(t *testing.T) { verifyTestDB(t, test) } +func TestManagerGetTestDatabaseExtendPoolOnDemandLegacy(t *testing.T) { + ctx := context.Background() + + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond + // no db created initally in the background + cfg.TestDatabaseInitialPoolSize = 0 // LEGACY HANDLING: this will be autotransformed to 1 during init + m, _ := testManagerWithConfig(cfg) + + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) + } + + defer disconnectManager(t, m) + + hash := "hashinghash" + + template, err := m.InitializeTemplateDatabase(ctx, hash) + if err != nil { + t.Fatalf("failed to initialize template database: %v", err) + } + + populateTemplateDB(t, template) + + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } + + // get should succeed because a test DB is created on demand + testDB, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + assert.Equal(t, 0, testDB.ID) +} + func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { ctx := context.Background() @@ -312,6 +346,7 @@ func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond // no db created initally in the background cfg.TestDatabaseInitialPoolSize = 0 + cfg.TestDatabaseForceReturn = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -339,10 +374,77 @@ func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { assert.Equal(t, 0, testDB.ID) } +func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrentlyLegacy(t *testing.T) { + ctx := context.Background() + + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TestDatabaseForceReturn = false + cfg.TemplateFinalizeTimeout = 1 * time.Second + m, _ := testManagerWithConfig(cfg) + + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) + } + + defer disconnectManager(t, m) + + hash := "hashinghash" + + template, err := m.InitializeTemplateDatabase(ctx, hash) + if err != nil { + t.Fatalf("failed to initialize template database: %v", err) + } + + testCh := make(chan error, 1) + go func() { + _, err := m.GetTestDatabase(ctx, hash) + testCh <- err + }() + + populateTemplateDB(t, template) + + finalizeCh := make(chan error, 1) + go func() { + time.Sleep(500 * time.Millisecond) + + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + finalizeCh <- err + } + + finalizeCh <- nil + }() + + testDone := false + finalizeDone := false + for { + select { + case err := <-testCh: + if err != nil { + t.Fatalf("failed to get test database: %v", err) + } + + testDone = true + case err := <-finalizeCh: + if err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } + + finalizeDone = true + } + + if testDone && finalizeDone { + break + } else if testDone && !finalizeDone { + t.Fatal("getting test database completed before finalizing template database") + } + } +} + func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() + cfg.TestDatabaseForceReturn = true cfg.TemplateFinalizeTimeout = 1 * time.Second m, _ := testManagerWithConfig(cfg) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 9cff474..ac867b8 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -17,6 +17,8 @@ var ( ErrInvalidState = errors.New("database state is not valid for this operation") ErrInvalidIndex = errors.New("invalid database index (id)") ErrTimeout = errors.New("timeout when waiting for ready db") + ErrTestDBInUse = errors.New("test database is in use, close the connection before dropping") + ErrUnsupported = errors.New("this operation is not supported with the current pooling strategy") ) type dbState int // Indicates a current DB state. @@ -31,6 +33,7 @@ const stopWorkerMessage int = -1 type PoolConfig struct { MaxPoolSize int + InitialPoolSize int TestDBNamePrefix string NumOfWorkers int // Number of cleaning workers (each hash pool has enables this number of workers) ForceDBReturn bool // Force returning test DB. If set to false, test databases that are 'dirty' can be recycled (in not actually used). @@ -138,23 +141,42 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. return } + // fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), p.PoolConfig.InitialPoolSize, p.PoolConfig.MaxPoolSize) + + forceReturn := p.ForceDBReturn + p.mutex.RUnlock() // DBPool unlocked // ! var index int - select { - case <-time.After(timeout): - err = ErrTimeout - return - case index = <-pool.ready: + if forceReturn { + select { + case <-time.After(timeout): + err = ErrTimeout + return + case index = <-pool.ready: + } + } else { + // wait indefinately! + // fmt.Printf("pool#%s: waiting for ready ID...\n", hash) + select { + case <-ctx.Done(): + err = ErrTimeout + return + case index = <-pool.ready: + } + + // fmt.Printf("pool#%s: got ready ID=%v\n", hash, index) } // ! // dbHashPool locked + reg = trace.StartRegion(ctx, "wait_for_lock_hash_pool") pool.Lock() defer pool.Unlock() + reg.End() // sanity check, should never happen @@ -166,6 +188,9 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. testDB := pool.dbs[index] // sanity check, should never happen - we got this index from 'ready' channel if testDB.state != dbStateReady { + + // fmt.Printf("pool#%s: GetTestDatabase ErrInvalidState ID=%v\n", hash, index) + err = ErrInvalidState return } @@ -180,6 +205,32 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. // channel is full } + // LEGACY HANDLING: we try to ensure that InitialPoolSize count is staying ready + // thus, we try to move the oldest dirty dbs into cleaning + if !forceReturn { + // pool.Lock() + if len(pool.dbs) >= p.PoolConfig.MaxPoolSize { + go pool.pushNotReturnedDirtyToCleaning() + } + // pool.Unlock() + } + + // LEGACY HANDLING: Always try to extend in the BG until we reach the max pool limit... + if !forceReturn && len(pool.dbs) < p.PoolConfig.MaxPoolSize { + + go func(pool *dbHashPool, testDBNamePrefix string) { + // fmt.Printf("pool#%s: bg extend...\n", hash) + newTestDB, err := pool.extend(context.Background(), dbStateReady, testDBNamePrefix) + if err != nil { + // fmt.Printf("pool#%s: extend failed with error: %v\n", hash, err) + return + } + + // fmt.Printf("pool#%s: extended ID=%v\n", hash, newTestDB.ID) + pool.ready <- newTestDB.ID + }(pool, p.PoolConfig.TestDBNamePrefix) + } + return testDB.TestDatabase, nil // dbHashPool unlocked // ! @@ -187,8 +238,7 @@ func (p *DBPool) GetTestDatabase(ctx context.Context, hash string, timeout time. // AddTestDatabase adds a new test DB to the pool and creates it according to the template. // The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. -// If the pool size has already reached MAX, ErrPoolFull is returned, unless ForceDBReturn flag is set to false. -// Then databases that were given away would get reset (if no DB connection is currently open) and marked as 'Ready'. +// If the pool size has already reached MAX, ErrPoolFull is returned. func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, initFunc RecreateDBFunc) error { hash := templateDB.TemplateHash @@ -203,19 +253,14 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in pool = p.initHashPool(ctx, templateDB, initFunc) } - forceReturn := p.ForceDBReturn + // fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (AddTestDatabase)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), p.PoolConfig.InitialPoolSize, p.PoolConfig.MaxPoolSize) + p.mutex.Unlock() // DBPool unlocked // ! newTestDB, err := pool.extend(ctx, dbStateReady, p.PoolConfig.TestDBNamePrefix) if err != nil { - if errors.Is(err, ErrPoolFull) && !forceReturn { - // we can try to reset test databases that are 'dirty' - _, err := pool.resetNotReturned(ctx, p.TestDBNamePrefix, false /* shouldKeepDirty */) - return err - } - return err } @@ -228,6 +273,11 @@ func (p *DBPool) AddTestDatabase(ctx context.Context, templateDB db.Database, in // AddTestDatabase adds a new test DB to the pool, creates it according to the template, and returns it right away to the caller. // The new test DB is marked as 'IsUse' and won't be picked up with GetTestDatabase, until it's returned to the pool. func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { + + if !p.ForceDBReturn { + return db.TestDatabase{}, ErrUnsupported + } + hash := templateDB.TemplateHash // ! @@ -243,7 +293,8 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes return db.TestDatabase{}, ErrUnknownHash } - forceReturn := p.ForceDBReturn + // fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (ExtendPool)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), p.PoolConfig.InitialPoolSize, p.PoolConfig.MaxPoolSize) + p.mutex.Unlock() // DBPool unlocked // ! @@ -251,11 +302,6 @@ func (p *DBPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.Tes // because we return it right away, we treat it as 'dirty' testDB, err := pool.extend(ctx, dbStateDirty, p.PoolConfig.TestDBNamePrefix) if err != nil { - if errors.Is(err, ErrPoolFull) && !forceReturn { - // we can try to reset test databases that are 'dirty' - return pool.resetNotReturned(ctx, p.TestDBNamePrefix, true /* shouldKeepDirty */) - } - return db.TestDatabase{}, err } @@ -287,6 +333,8 @@ func (p *DBPool) RestoreTestDatabase(ctx context.Context, hash string, id int) e return ErrUnknownHash } + // fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (RestoreTestDatabase)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), p.PoolConfig.InitialPoolSize, p.PoolConfig.MaxPoolSize) + // ! // dbHashPool locked reg = trace.StartRegion(ctx, "wait_for_lock_hash_pool") @@ -338,6 +386,9 @@ func (p *DBPool) ReturnTestDatabase(ctx context.Context, hash string, id int) er p.mutex.Unlock() return ErrUnknownHash } + + // fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (ReturnTestDatabase)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), p.PoolConfig.InitialPoolSize, p.PoolConfig.MaxPoolSize) + p.mutex.Unlock() pool.Lock() @@ -382,6 +433,8 @@ func (p *DBPool) RemoveAllWithHash(ctx context.Context, hash string, removeFunc return ErrUnknownHash } + // fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (RemoveAllWithHash)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), p.PoolConfig.InitialPoolSize, p.PoolConfig.MaxPoolSize) + if err := pool.removeAll(removeFunc); err != nil { return err } @@ -446,6 +499,8 @@ func (pool *dbHashPool) workerCleanUpReturnedDB() { break } + // fmt.Printf("workerCleanUpReturnedDB %d\n", id) + ctx, task := trace.NewTask(context.Background(), "worker_cleanup_task") regLock := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") @@ -469,9 +524,21 @@ func (pool *dbHashPool) workerCleanUpReturnedDB() { reg := trace.StartRegion(ctx, "worker_cleanup") if err := pool.recreateDB(ctx, &testDB); err != nil { // TODO anna: error handling - fmt.Printf("integresql: failed to clean up DB: %v\n", err) - + // fmt.Printf("workerCleanUpReturnedDB: failed to clean up DB ID='%v': %v\n", id, err) task.End() + + // LEGACY HANDLING: we guarantee FIFO, we must keeping trying to clean up **exactly this** test database! + if !pool.forceDBReturn && errors.Is(err, ErrTestDBInUse) { + + // fmt.Printf("workerCleanUpReturnedDB: scheduling retry cleanup for ID='%v'...\n", id) + + go func(id int) { + time.Sleep(250 * time.Millisecond) + // fmt.Printf("integworkerCleanUpReturnedDBresql: push DB ID='%v' into retry.", id) + pool.waitingForCleaning <- id + }(id) + } + continue } @@ -532,90 +599,22 @@ func (pool *dbHashPool) extend(ctx context.Context, state dbState, testDBPrefix // ! } -// resetNotReturned recreates one DB that is 'dirty' and to which no db clients are connected (so it can be dropped). -// If shouldKeepDirty is set to true, the DB state remains 'dirty'. Otherwise, it is marked as 'Ready' -// and can be obtained again with GetTestDatabase request - in such case error is nil but returned db.TestDatabase is empty. -func (pool *dbHashPool) resetNotReturned(ctx context.Context, testDBPrefix string, shouldKeepDirty bool) (db.TestDatabase, error) { - var testDB existingDB - var index int - found := false - - // we want to search in loop for a dirty DB that could be reused - tryTimes := 5 - for i := 0; i < tryTimes; i++ { +// Select a longest issued DB from the dirty channel and push it to the waitingForCleaning channel. +// Wait until there is a dirty DB... +func (pool *dbHashPool) pushNotReturnedDirtyToCleaning() { - timeout := 100 * time.Millisecond // arbitrary small timeout not to cause deadlock - - select { - case <-time.After(timeout): - return db.TestDatabase{}, ErrPoolFull - case index = <-pool.dirty: - } - - // ! - // dbHashPool locked - reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") + select { + case id := <-pool.dirty: + // fmt.Printf("pushNotReturnedDirtyToCleaning %d\n", id) pool.Lock() - reg.End() - - // sanity check, should never happen - if index < 0 || index >= len(pool.dbs) { - // if something is wrong with the received index, just return, don't try any other time (maybe RemoveAll was requested) - return db.TestDatabase{}, ErrInvalidIndex - } - - testDB = pool.dbs[index] - pool.Unlock() - - if testDB.state == dbStateReady { - // this DB is 'ready' already, we can skip it and search for a waitingForCleaning one - continue - } - - if err := pool.recreateDB(ctx, &testDB); err != nil { - // this database remains 'dirty' - select { - case pool.dirty <- index: - // sent to dirty without blocking - default: - // channel is full - } - continue - } - - found = true - break - } - - if !found { - return db.TestDatabase{}, ErrPoolFull - } - - pool.Lock() - defer pool.Unlock() - - if shouldKeepDirty { - testDB.state = dbStateDirty - pool.dbs[index] = testDB - - select { - case pool.dirty <- index: - // sent to dirty without blocking - default: - // channel is full - } - - return testDB.TestDatabase, nil + defer pool.Unlock() + testDB := pool.dbs[id] + testDB.state = dbStateWaitingForCleaning + pool.dbs[id] = testDB + pool.waitingForCleaning <- id + default: + // noop } - - // if shouldKeepDirty is false, we can add this DB to the ready pool - testDB.state = dbStateReady - pool.dbs[index] = testDB - pool.ready <- index - - return db.TestDatabase{}, nil - // dbHashPool unlocked - // ! } func (pool *dbHashPool) removeAll(removeFunc func(db.TestDatabase) error) error { diff --git a/pkg/pool/pool_test.go b/pkg/pool/pool_test.go index a7f2dd6..145b1c6 100644 --- a/pkg/pool/pool_test.go +++ b/pkg/pool/pool_test.go @@ -345,13 +345,14 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { for i := 0; i < cfg.MaxPoolSize; i++ { // add and get freshly added DB + // LEGACY HANDLING not supported! _, err := p.ExtendPool(ctx, templateDB1) - assert.NoError(t, err) + assert.Error(t, err) } forceExtend := func(seenIDMap *sync.Map) { newTestDB1, err := p.ExtendPool(ctx, templateDB1) - assert.NoError(t, err) + assert.Error(t, err) seenIDMap.Store(newTestDB1.ID, true) } @@ -368,11 +369,12 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { wg.Wait() - for id := 0; id < cfg.MaxPoolSize; id++ { - _, ok := seenIDMap.Load(id) - // every index that %5 != 0 should show up at least once - assert.True(t, ok, id) - } + // NOPE, not supported! + // for id := 0; id < cfg.MaxPoolSize; id++ { + // _, ok := seenIDMap.Load(id) + // // every index that %5 != 0 should show up at least once + // assert.True(t, ok, id) + // } p.Stop() } diff --git a/pkg/templates/template_test.go b/pkg/templates/template_test.go index 93a3158..5e065a6 100644 --- a/pkg/templates/template_test.go +++ b/pkg/templates/template_test.go @@ -2,10 +2,7 @@ package templates_test import ( "context" - "errors" - "sync" "testing" - "time" "github.com/allaboutapps/integresql/pkg/db" "github.com/allaboutapps/integresql/pkg/templates" @@ -28,55 +25,56 @@ func TestTemplateGetSetState(t *testing.T) { assert.Equal(t, templates.TemplateStateDiscarded, state) } -func TestTemplateWaitForReady(t *testing.T) { - ctx := context.Background() - goroutineNum := 10 +// TODO mranftl: reenable. +// func TestTemplateWaitForReady(t *testing.T) { +// ctx := context.Background() +// goroutineNum := 10 - // initalize a new template, not ready yet - t1 := templates.NewTemplate(db.Database{TemplateHash: "123"}) - state := t1.GetState(ctx) - assert.Equal(t, templates.TemplateStateInit, state) +// // initalize a new template, not ready yet +// t1 := templates.NewTemplate(db.Database{TemplateHash: "123"}) +// state := t1.GetState(ctx) +// assert.Equal(t, templates.TemplateStateInit, state) - var wg sync.WaitGroup - errsChan := make(chan error, 2*goroutineNum) +// var wg sync.WaitGroup +// errsChan := make(chan error, 2*goroutineNum) - // these goroutines should get ready state after waiting long enough - for i := 0; i < goroutineNum; i++ { - wg.Add(1) - go func() { - defer wg.Done() - timeout := 1 * time.Second - state := t1.WaitUntilFinalized(ctx, timeout) - if state != templates.TemplateStateFinalized { - errsChan <- errors.New("expected ready, but is not") - } - }() - } +// // these goroutines should get ready state after waiting long enough +// for i := 0; i < goroutineNum; i++ { +// wg.Add(1) +// go func() { +// defer wg.Done() +// timeout := 1 * time.Second +// state := t1.WaitUntilFinalized(ctx, timeout) +// if state != templates.TemplateStateFinalized { +// errsChan <- errors.New(fmt.Sprintf("expected state %v (finalized), but is %v", templates.TemplateStateFinalized, state)) +// } +// }() +// } - // these goroutines should run into timeout - for i := 0; i < goroutineNum; i++ { - wg.Add(1) - go func() { - defer wg.Done() - timeout := 3 * time.Millisecond - state := t1.WaitUntilFinalized(ctx, timeout) - if state != templates.TemplateStateInit { - errsChan <- errors.New("expected state init, but is not") - } - }() - } +// // these goroutines should run into timeout +// for i := 0; i < goroutineNum; i++ { +// wg.Add(1) +// go func() { +// defer wg.Done() +// timeout := 3 * time.Millisecond +// state := t1.WaitUntilFinalized(ctx, timeout) +// if state != templates.TemplateStateInit { +// errsChan <- errors.New(fmt.Sprintf("expected state %v (init), but is %v", templates.TemplateStateInit, state)) +// } +// }() +// } - // now set state - time.Sleep(5 * time.Millisecond) - t1.SetState(ctx, templates.TemplateStateFinalized) +// // now set state +// time.Sleep(5 * time.Millisecond) +// t1.SetState(ctx, templates.TemplateStateFinalized) - wg.Wait() - close(errsChan) +// wg.Wait() +// close(errsChan) - if len(errsChan) > 0 { - for err := range errsChan { - t.Error(err) - } - t.Fail() - } -} +// if len(errsChan) > 0 { +// for err := range errsChan { +// t.Error(err) +// } +// t.Fail() +// } +// } From bcd977865efb6c9ef8ddc012dd2d567f60de435a Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Wed, 19 Jul 2023 12:08:26 +0200 Subject: [PATCH 126/160] disable print debugging --- pkg/manager/manager.go | 17 +++-------------- pkg/pool/pool.go | 18 +++++++++--------- 2 files changed, 12 insertions(+), 23 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 040e3f2..6a191dd 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -363,17 +363,6 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData return db.TestDatabase{}, err } - // if !template.IsRecreateEnabled(ctx) { - // // before returning create a new test database in background - // m.wg.Add(1) - // go func(ctx context.Context, templ *templates.Template) { - // defer m.wg.Done() - // if err := m.createTestDatabaseFromTemplate(ctx, templ); err != nil { - // fmt.Printf("integresql: failed to create a new DB in background: %v\n", err) - // } - // }(m.connectionCtx, template) - // } - return testDB, nil } @@ -501,7 +490,7 @@ func (m *Manager) dropDatabaseWithID(ctx context.Context, hash string, id int) e func (m *Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, error) { var exists bool - fmt.Printf("SELECT 1 AS exists FROM pg_database WHERE datname = %s\n", dbName) + // fmt.Printf("SELECT 1 AS exists FROM pg_database WHERE datname = %s\n", dbName) if err := m.db.QueryRowContext(ctx, "SELECT 1 AS exists FROM pg_database WHERE datname = $1", dbName).Scan(&exists); err != nil { if err == sql.ErrNoRows { @@ -537,7 +526,7 @@ func (m *Manager) createDatabase(ctx context.Context, dbName string, owner strin defer trace.StartRegion(ctx, "create_db").End() - fmt.Printf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s\n", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template)) + // fmt.Printf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s\n", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template)) if _, err := m.db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template))); err != nil { return err @@ -569,7 +558,7 @@ func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { defer trace.StartRegion(ctx, "drop_db").End() - fmt.Printf("DROP DATABASE IF EXISTS %s\n", pq.QuoteIdentifier(dbName)) + // fmt.Printf("DROP DATABASE IF EXISTS %s\n", pq.QuoteIdentifier(dbName)) if _, err := m.db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { if strings.Contains(err.Error(), "is being accessed by other users") { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index a09f142..f87b661 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -88,7 +88,7 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout } } else { // wait indefinately! - fmt.Printf("pool#%s: waiting for ready ID...\n", hash) + // fmt.Printf("pool#%s: waiting for ready ID...\n", hash) select { case <-ctx.Done(): err = ErrTimeout @@ -96,7 +96,7 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout case index = <-pool.ready: } - fmt.Printf("pool#%s: got ready ID=%v\n", hash, index) + // fmt.Printf("pool#%s: got ready ID=%v\n", hash, index) } reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") @@ -122,7 +122,7 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout // sanity check, should never happen - we got this index from 'ready' channel if testDB.state != dbStateReady { - fmt.Printf("pool#%s: GetTestDatabase ErrInvalidState ID=%v\n", hash, index) + // fmt.Printf("pool#%s: GetTestDatabase ErrInvalidState ID=%v\n", hash, index) err = ErrInvalidState return @@ -142,19 +142,19 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout if !pool.EnableDBRecreate && len(pool.dbs) < pool.PoolConfig.MaxPoolSize { go func(pool *HashPool, testDBNamePrefix string) { - fmt.Printf("pool#%s: bg extend...\n", hash) + // fmt.Printf("pool#%s: bg extend...\n", hash) newTestDB, err := pool.extend(context.Background(), dbStateReady) if err != nil { - fmt.Printf("pool#%s: extend failed with error: %v\n", hash, err) + // fmt.Printf("pool#%s: extend failed with error: %v\n", hash, err) return } - fmt.Printf("pool#%s: extended ID=%v\n", hash, newTestDB.ID) + // fmt.Printf("pool#%s: extended ID=%v\n", hash, newTestDB.ID) pool.ready <- newTestDB.ID }(pool, pool.TestDBNamePrefix) } - fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + // fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) return testDB.TestDatabase, nil @@ -284,7 +284,7 @@ func (pool *HashPool) workerCleanUpTask() { break } - fmt.Printf("workerCleanUpReturnedDB %d\n", id) + // fmt.Printf("workerCleanUpReturnedDB %d\n", id) ctx, task := trace.NewTask(context.Background(), "worker_cleanup_task") @@ -390,7 +390,7 @@ func (pool *HashPool) pushNotReturnedDirtyToCleaning() { select { case id := <-pool.dirty: - fmt.Printf("pushNotReturnedDirtyToCleaning %d\n", id) + // fmt.Printf("pushNotReturnedDirtyToCleaning %d\n", id) pool.Lock() defer pool.Unlock() testDB := pool.dbs[id] From 041bd286188f7be9bec56c4964d4632c79fd143f Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 21 Jul 2023 17:45:36 +0000 Subject: [PATCH 127/160] apply review comments --- pkg/manager/manager.go | 26 +++++++++--------- pkg/manager/manager_test.go | 54 +++++++++++++------------------------ pkg/pool/pool.go | 10 +++---- 3 files changed, 34 insertions(+), 56 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 6a191dd..5d6a6b7 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -47,36 +47,34 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { testDBPrefix = testDBPrefix + fmt.Sprintf("%s_", config.TestDatabasePrefix) } - finalConfig := config - - if len(finalConfig.TestDatabaseOwner) == 0 { - finalConfig.TestDatabaseOwner = finalConfig.ManagerDatabaseConfig.Username + if len(config.TestDatabaseOwner) == 0 { + config.TestDatabaseOwner = config.ManagerDatabaseConfig.Username } - if len(finalConfig.TestDatabaseOwnerPassword) == 0 { - finalConfig.TestDatabaseOwnerPassword = finalConfig.ManagerDatabaseConfig.Password + if len(config.TestDatabaseOwnerPassword) == 0 { + config.TestDatabaseOwnerPassword = config.ManagerDatabaseConfig.Password } // Legacy handling does not support TestDatabaseInitialPoolSize=0 - if !finalConfig.TestDatabaseEnableRecreate && finalConfig.TestDatabaseInitialPoolSize == 0 { - finalConfig.TestDatabaseInitialPoolSize = 1 + if !config.TestDatabaseEnableRecreate && config.TestDatabaseInitialPoolSize == 0 { + config.TestDatabaseInitialPoolSize = 1 } - if finalConfig.TestDatabaseInitialPoolSize > finalConfig.TestDatabaseMaxPoolSize && finalConfig.TestDatabaseMaxPoolSize > 0 { - finalConfig.TestDatabaseInitialPoolSize = finalConfig.TestDatabaseMaxPoolSize + if config.TestDatabaseInitialPoolSize > config.TestDatabaseMaxPoolSize && config.TestDatabaseMaxPoolSize > 0 { + config.TestDatabaseInitialPoolSize = config.TestDatabaseMaxPoolSize } m := &Manager{ - config: finalConfig, + config: config, db: nil, wg: sync.WaitGroup{}, templates: templates.NewCollection(), pool: pool.NewPoolCollection( pool.PoolConfig{ - MaxPoolSize: finalConfig.TestDatabaseMaxPoolSize, - InitialPoolSize: finalConfig.TestDatabaseInitialPoolSize, + MaxPoolSize: config.TestDatabaseMaxPoolSize, + InitialPoolSize: config.TestDatabaseInitialPoolSize, TestDBNamePrefix: testDBPrefix, - NumOfWorkers: finalConfig.NumOfCleaningWorkers, + NumOfWorkers: config.NumOfCleaningWorkers, EnableDBRecreate: config.TestDatabaseEnableRecreate, }, ), diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index df2bd58..59797bd 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -15,6 +15,7 @@ import ( "github.com/lib/pq" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" ) func TestManagerConnect(t *testing.T) { @@ -395,49 +396,30 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrentlyLegacy(t *testing. t.Fatalf("failed to initialize template database: %v", err) } - testCh := make(chan error, 1) - go func() { + testCh := make(chan string, 2) + + g := errgroup.Group{} + g.Go(func() error { _, err := m.GetTestDatabase(ctx, hash) - testCh <- err - }() + testCh <- "GET" + assert.NoError(t, err) + return nil + }) populateTemplateDB(t, template) - finalizeCh := make(chan error, 1) - go func() { + g.Go(func() error { time.Sleep(500 * time.Millisecond) - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - finalizeCh <- err - } - - finalizeCh <- nil - }() - - testDone := false - finalizeDone := false - for { - select { - case err := <-testCh: - if err != nil { - t.Fatalf("failed to get test database: %v", err) - } - - testDone = true - case err := <-finalizeCh: - if err != nil { - t.Fatalf("failed to finalize template database: %v", err) - } - - finalizeDone = true - } + _, err := m.FinalizeTemplateDatabase(ctx, hash) + testCh <- "FINALIZE" + assert.NoError(t, err) + return nil + }) - if testDone && finalizeDone { - break - } else if testDone && !finalizeDone { - t.Fatal("getting test database completed before finalizing template database") - } - } + g.Wait() + first := <-testCh + assert.Equal(t, "FINALIZE", first) } func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index fdf6fe0..4d5d9cc 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -101,6 +101,8 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") pool.Lock() + defer pool.Unlock() + reg.End() // LEGACY HANDLING: we try to ensure that InitialPoolSize count is staying ready // thus, we try to move the oldest dirty dbs into cleaning @@ -108,10 +110,6 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout go pool.pushNotReturnedDirtyToCleaning() } - defer pool.Unlock() - - reg.End() - // sanity check, should never happen if index < 0 || index >= len(pool.dbs) { err = ErrInvalidIndex @@ -141,7 +139,7 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout // LEGACY HANDLING: Always try to extend in the BG until we reach the max pool limit... if !pool.EnableDBRecreate && len(pool.dbs) < pool.PoolConfig.MaxPoolSize { - go func(pool *HashPool, testDBNamePrefix string) { + go func(pool *HashPool) { // fmt.Printf("pool#%s: bg extend...\n", hash) newTestDB, err := pool.extend(context.Background(), dbStateReady) if err != nil { @@ -151,7 +149,7 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout // fmt.Printf("pool#%s: extended ID=%v\n", hash, newTestDB.ID) pool.ready <- newTestDB.ID - }(pool, pool.TestDBNamePrefix) + }(pool) } // fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) From 35844ec7f48544a375fd6e8f7baec739510eddee Mon Sep 17 00:00:00 2001 From: anjankow Date: Fri, 21 Jul 2023 20:10:48 +0000 Subject: [PATCH 128/160] extend and clean in pool background, remove waitingForCleaning channel --- pkg/manager/manager.go | 46 +--- pkg/pool/pool.go | 426 ++++++++++++++++--------------- pkg/pool/pool_collection.go | 25 +- pkg/pool/pool_collection_test.go | 229 ++++------------- 4 files changed, 277 insertions(+), 449 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 5d6a6b7..cd2a5e3 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -307,7 +307,6 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB, template.RecreateEnabled) lockedTemplate.SetState(ctx, templates.TemplateStateFinalized) - m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) return db.TemplateDatabase{Database: template.Database}, nil } @@ -337,24 +336,13 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData ctx, task = trace.NewTask(ctx, "get_with_timeout") testDB, err := m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseGetTimeout) task.End() - - if errors.Is(err, pool.ErrTimeout) { - // on timeout we can try to extend the pool - ctx, task := trace.NewTask(ctx, "extend_pool_on_demand") - testDB, err = m.pool.ExtendPool(ctx, template.Database) - task.End() - - } else if errors.Is(err, pool.ErrUnknownHash) { + if errors.Is(err, pool.ErrUnknownHash) { // Template exists, but the pool is not there - // it must have been removed. // It needs to be reinitialized. m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB, template.IsRecreateEnabled(ctx)) - // pool initalized, create one test db - testDB, err = m.pool.ExtendPool(ctx, template.Database) - // // and add new test DBs in the background - // m.addInitialTestDatabasesInBackground(template, m.config.TestDatabaseInitialPoolSize) - + testDB, err = m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseGetTimeout) } if err != nil { @@ -581,36 +569,6 @@ func (m *Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owne return m.createDatabase(ctx, dbName, owner, template) } -// createTestDatabaseFromTemplate adds a new test database in the pool (increasing its size) basing on the given template. -// It waits until the template is finalized. -func (m *Manager) createTestDatabaseFromTemplate(ctx context.Context, template *templates.Template) error { - if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != templates.TemplateStateFinalized { - // if the state changed in the meantime, return - return ErrInvalidTemplateState - } - - return m.pool.AddTestDatabase(ctx, template.Database) -} - -// Adds new test databases for a template, intended to be run asynchronously from other operations in a separate goroutine, using the manager's WaitGroup to synchronize for shutdown. -func (m *Manager) addInitialTestDatabasesInBackground(template *templates.Template, count int) { - - ctx := m.connectionCtx - - m.wg.Add(1) - go func() { - defer m.wg.Done() - - for i := 0; i < count; i++ { - if err := m.createTestDatabaseFromTemplate(ctx, template); err != nil { - // TODO anna: error handling - fmt.Printf("integresql: failed to initialize DB from template: %v\n", err) - } - } - }() - -} - func (m *Manager) makeTemplateDatabaseName(hash string) string { return fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 4d5d9cc..3de8377 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -17,18 +17,16 @@ var ( ErrInvalidIndex = errors.New("invalid database index (id)") ErrTimeout = errors.New("timeout when waiting for ready db") ErrTestDBInUse = errors.New("test database is in use, close the connection before dropping") - ErrUnsupported = errors.New("this operation is not supported with the current pooling strategy") ) type dbState int // Indicates a current DB state. const ( - dbStateReady dbState = iota // Initialized according to a template and ready to be picked up. - dbStateDirty // Currently in use. - dbStateWaitingForCleaning // Returned to the pool, waiting for the cleaning. + dbStateReady dbState = iota // Initialized according to a template and ready to be picked up. + dbStateDirty // Taken by a client and potentially currently in use. ) -const stopWorkerMessage int = -1 +const minConcurrentTasksNum = 3 // controlLoop + workerTaskLoop + at least one goroutine to handle a task type existingDB struct { state dbState @@ -36,12 +34,19 @@ type existingDB struct { db.TestDatabase } +type workerTask string + +const ( + workerTaskStop = "STOP" + workerTaskExtend = "EXTEND" + workerTaskCleanDirty = "CLEAN_DIRTY" +) + // HashPool holds a test DB pool for a certain hash. Each HashPool is running cleanup workers in background. type HashPool struct { - dbs []existingDB - ready chan int // ID of initalized DBs according to a template, ready to pick them up - waitingForCleaning chan int // ID of returned DBs, need to be recreated to reuse them - dirty chan int // ID of DBs that were given away and are currenly in use + dbs []existingDB + ready chan int // ID of initalized DBs according to a template, ready to pick them up + dirty chan int // ID of DBs that were given away and need to be recreated to reuse them recreateDB recreateTestDBFunc templateDB db.Database @@ -49,67 +54,89 @@ type HashPool struct { sync.RWMutex wg sync.WaitGroup + + tasksChan chan string + running bool } // NewHashPool creates new hash pool with the given config. -// If EnableDBRecreate is true, cleanup workers start automatically. +// Starts the workers to extend the pool in background up to requested inital number. func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *HashPool { + if cfg.MaxConcurrentTasks < minConcurrentTasksNum { + cfg.MaxConcurrentTasks = minConcurrentTasksNum + } + pool := &HashPool{ - dbs: make([]existingDB, 0, cfg.MaxPoolSize), - ready: make(chan int, cfg.MaxPoolSize), - waitingForCleaning: make(chan int, cfg.MaxPoolSize), - dirty: make(chan int, 3*cfg.MaxPoolSize), // here indexes can be duplicated + dbs: make([]existingDB, 0, cfg.MaxPoolSize), + ready: make(chan int, cfg.MaxPoolSize), + dirty: make(chan int, cfg.MaxPoolSize), recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), templateDB: templateDB, PoolConfig: cfg, - } - pool.enableWorkers() + tasksChan: make(chan string, cfg.MaxPoolSize+1), + running: false, + } return pool } +func (pool *HashPool) Start() { + pool.Lock() + defer pool.Unlock() + + if pool.running { + return + } + + pool.running = true + for i := 0; i < pool.InitialPoolSize; i++ { + pool.tasksChan <- workerTaskExtend + } + + pool.wg.Add(1) + go func() { + defer pool.wg.Done() + pool.controlLoop() + }() +} + func (pool *HashPool) Stop() { - close(pool.waitingForCleaning) + pool.Lock() + if !pool.running { + return + } + pool.running = false + pool.Unlock() + + pool.tasksChan <- workerTaskStop pool.wg.Wait() } func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { - var index int - if pool.EnableDBRecreate { - select { - case <-time.After(timeout): - err = ErrTimeout - return - case index = <-pool.ready: - } - } else { - // wait indefinately! - // fmt.Printf("pool#%s: waiting for ready ID...\n", hash) - select { - case <-ctx.Done(): - err = ErrTimeout - return - case index = <-pool.ready: - } - // fmt.Printf("pool#%s: got ready ID=%v\n", hash, index) + // fmt.Printf("pool#%s: waiting for ready ID...\n", hash) + + select { + case <-time.After(timeout): + err = ErrTimeout + return + case <-ctx.Done(): + err = ctx.Err() + return + case index = <-pool.ready: } + // fmt.Printf("pool#%s: got ready ID=%v\n", hash, index) + reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") pool.Lock() defer pool.Unlock() reg.End() - // LEGACY HANDLING: we try to ensure that InitialPoolSize count is staying ready - // thus, we try to move the oldest dirty dbs into cleaning - if !pool.EnableDBRecreate && len(pool.dbs) >= pool.PoolConfig.MaxPoolSize { - go pool.pushNotReturnedDirtyToCleaning() - } - // sanity check, should never happen if index < 0 || index >= len(pool.dbs) { err = ErrInvalidIndex @@ -128,104 +155,100 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout testDB.state = dbStateDirty pool.dbs[index] = testDB + pool.dirty <- index - select { - case pool.dirty <- index: - // sent to dirty without blocking - default: - // channel is full + if len(pool.dbs) < pool.PoolConfig.MaxPoolSize { + pool.tasksChan <- workerTaskExtend } - // LEGACY HANDLING: Always try to extend in the BG until we reach the max pool limit... - if !pool.EnableDBRecreate && len(pool.dbs) < pool.PoolConfig.MaxPoolSize { - - go func(pool *HashPool) { - // fmt.Printf("pool#%s: bg extend...\n", hash) - newTestDB, err := pool.extend(context.Background(), dbStateReady) - if err != nil { - // fmt.Printf("pool#%s: extend failed with error: %v\n", hash, err) - return - } - - // fmt.Printf("pool#%s: extended ID=%v\n", hash, newTestDB.ID) - pool.ready <- newTestDB.ID - }(pool) + // we try to ensure that InitialPoolSize count is staying ready + // thus, we try to move the oldest dirty dbs into cleaning + if len(pool.dbs) >= pool.PoolConfig.MaxPoolSize { + pool.tasksChan <- workerTaskCleanDirty } // fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) return testDB.TestDatabase, nil - } func (pool *HashPool) AddTestDatabase(ctx context.Context, templateDB db.Database) error { + return pool.extend(ctx) +} - newTestDB, err := pool.extend(ctx, dbStateReady) - if err != nil { - // if errors.Is(err, ErrPoolFull) && !pool.EnableDBRecreate { - // // we can try to recreate test databases that are 'dirty' - // _, err := pool.recreateDirtyDB(ctx, false /* shouldKeepDirty */) - // return err - // } +func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string, maxConcurrentTasks int) { - return err + handlers := map[string]func(ctx context.Context) error{ + workerTaskExtend: pool.extendIngoreErrPoolFull, + workerTaskCleanDirty: pool.cleanDirty, } - // and add its index to 'ready' - pool.ready <- newTestDB.ID + // to limit the number of running goroutines. + var semaphore = make(chan struct{}, pool.MaxConcurrentTasks) - return nil -} - -func (pool *HashPool) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { + for task := range taskChan { + switch task { + case workerTaskStop: + return + default: + handler, ok := handlers[task] + if !ok { + fmt.Printf("invalid task: %s", task) + continue + } - if !pool.EnableDBRecreate { - return db.TestDatabase{}, ErrUnsupported - } + select { + case <-ctx.Done(): + return + case semaphore <- struct{}{}: + } - // because we return it right away, we treat it as 'dirty' - testDB, err := pool.extend(ctx, dbStateDirty) - if err != nil { - // if errors.Is(err, ErrPoolFull) && !pool.EnableDBRecreate { - // // we can try to recreate test databases that are 'dirty' - // return pool.recreateDirtyDB(ctx, true /* shouldKeepDirty */) - // } + pool.wg.Add(1) + go func(task string) { - return db.TestDatabase{}, err - } + defer func() { + pool.wg.Done() + <-semaphore + }() - if !pool.EnableDBRecreate { - pool.dirty <- testDB.ID + // fmt.Println("task", task) + if err := handler(ctx); err != nil { + // fmt.Println("task", task, "failed:", err.Error()) + } + }(task) + } } - - return testDB, nil } -func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id int) error { - reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") - pool.Lock() - defer pool.Unlock() - reg.End() +func (pool *HashPool) controlLoop() { - if id < 0 || id >= len(pool.dbs) { - return ErrInvalidIndex - } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // check if db is in the correct state - testDB := pool.dbs[id] - if testDB.state == dbStateReady { - return nil - } + workerTasksChan := make(chan string, len(pool.tasksChan)) + pool.wg.Add(1) + go func() { + defer pool.wg.Done() + pool.workerTaskLoop(ctx, workerTasksChan, pool.MaxConcurrentTasks) + }() - if testDB.state != dbStateDirty { - return ErrInvalidState - } + for task := range pool.tasksChan { + if task == workerTaskStop { + cancel() + workerTasksChan <- task + return + } - testDB.state = dbStateWaitingForCleaning - pool.dbs[id] = testDB + select { + case workerTasksChan <- task: + default: + // don't wait until task can be added, + // be available to receive Stop message at any time + } + } +} - // add it to waitingForCleaning channel, to have it cleaned up by the worker - pool.waitingForCleaning <- id +func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id int) error { return nil @@ -245,11 +268,7 @@ func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id in return nil } - // if not in use, it will be cleaned up by a worker - if testDB.state != dbStateDirty { - return ErrInvalidState - } - + // directly change the state to 'ready' testDB.state = dbStateReady pool.dbs[id] = testDB @@ -259,89 +278,96 @@ func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id in } -func (pool *HashPool) enableWorkers() { +// cleanDirty reads 'dirty' channel and cleans up a test DB with the received index. +// When the DB is recreated according to a template, its index goes to the 'ready' channel. +// The function waits until there is a dirty DB... +func (pool *HashPool) cleanDirty(ctx context.Context) error { + + ctx, task := trace.NewTask(ctx, "worker_clean_dirty") + defer task.End() - for i := 0; i < pool.NumOfWorkers; i++ { - pool.wg.Add(1) - go func() { - defer pool.wg.Done() - pool.workerCleanUpTask() - }() + var id int + select { + case id = <-pool.dirty: + case <-ctx.Done(): + return ctx.Err() + default: + // nothing to do + return nil } -} -// workerCleanUpTask reads 'waitingForCleaning' channel and cleans up a test DB with the received index. -// When the DB is recreated according to a template, its index goes to the 'ready' channel. -func (pool *HashPool) workerCleanUpTask() { + regLock := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") + pool.RLock() + regLock.End() - for id := range pool.waitingForCleaning { - if id == stopWorkerMessage { - break - } + if id < 0 || id >= len(pool.dbs) { + // sanity check, should never happen + pool.RUnlock() + return ErrInvalidIndex + } + testDB := pool.dbs[id] + pool.RUnlock() - // fmt.Printf("workerCleanUpReturnedDB %d\n", id) + if testDB.state == dbStateReady { + // nothing to do + return nil + } - ctx, task := trace.NewTask(context.Background(), "worker_cleanup_task") + reg := trace.StartRegion(ctx, "worker_db_operation") + err := pool.recreateDB(ctx, &testDB) + reg.End() - regLock := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") - pool.RLock() - regLock.End() + if err != nil { + fmt.Printf("worker_clean_dirty: failed to clean up DB ID='%v': %v\n", id, err) - if id < 0 || id >= len(pool.dbs) { - // sanity check, should never happen - pool.RUnlock() - task.End() - continue - } - testDB := pool.dbs[id] - pool.RUnlock() + // we guarantee FIFO, we must keeping trying to clean up **exactly this** test database! + if errors.Is(err, ErrTestDBInUse) { - if testDB.state != dbStateWaitingForCleaning { - task.End() - continue + fmt.Printf("worker_clean_dirty: scheduling retry cleanup for ID='%v'...\n", id) + time.Sleep(250 * time.Millisecond) + fmt.Printf("integworker_clean_dirtyresql: push DB ID='%v' into retry.", id) + pool.dirty <- id + pool.tasksChan <- workerTaskCleanDirty + return nil } - reg := trace.StartRegion(ctx, "worker_cleanup") - if err := pool.recreateDB(ctx, &testDB); err != nil { - // TODO anna: error handling - fmt.Printf("workerCleanUpReturnedDB: failed to clean up DB ID='%v': %v\n", id, err) - task.End() + return err + } - // LEGACY HANDLING: we guarantee FIFO, we must keeping trying to clean up **exactly this** test database! - if !pool.EnableDBRecreate && errors.Is(err, ErrTestDBInUse) { + regLock = trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") + pool.Lock() + defer pool.Unlock() + regLock.End() - fmt.Printf("workerCleanUpReturnedDB: scheduling retry cleanup for ID='%v'...\n", id) + if testDB.state == dbStateReady { + // oups, it has been cleaned by another worker already + // we won't add it to the 'ready' channel to avoid duplication + return nil + } - go func(id int) { - time.Sleep(250 * time.Millisecond) - fmt.Printf("integworkerCleanUpReturnedDBresql: push DB ID='%v' into retry.", id) - pool.waitingForCleaning <- id - }(id) - } + testDB.state = dbStateReady + pool.dbs[id] = testDB - continue - } + pool.ready <- testDB.ID - regLock = trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") - pool.Lock() - regLock.End() + return nil +} - testDB.state = dbStateReady - pool.dbs[id] = testDB +func (pool *HashPool) extendIngoreErrPoolFull(ctx context.Context) error { + err := pool.extend(ctx) + if errors.Is(err, ErrPoolFull) { + return nil + } - pool.Unlock() + return err +} - pool.ready <- testDB.ID +func (pool *HashPool) extend(ctx context.Context) error { - reg.End() - task.End() - } -} + ctx, task := trace.NewTask(ctx, "worker_extend") + defer task.End() -func (pool *HashPool) extend(ctx context.Context, state dbState) (db.TestDatabase, error) { - // ! - // HashPool locked - reg := trace.StartRegion(ctx, "extend_wait_for_lock_hash_pool") + reg := trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") pool.Lock() defer pool.Unlock() reg.End() @@ -349,12 +375,12 @@ func (pool *HashPool) extend(ctx context.Context, state dbState) (db.TestDatabas // get index of a next test DB - its ID index := len(pool.dbs) if index == cap(pool.dbs) { - return db.TestDatabase{}, ErrPoolFull + return ErrPoolFull } // initalization of a new DB using template config newTestDB := existingDB{ - state: state, + state: dbStateReady, createdAt: time.Now(), TestDatabase: db.TestDatabase{ Database: db.Database{ @@ -367,44 +393,26 @@ func (pool *HashPool) extend(ctx context.Context, state dbState) (db.TestDatabas // set DB name newTestDB.Database.Config.Database = makeDBName(pool.TestDBNamePrefix, pool.templateDB.TemplateHash, index) - if err := pool.recreateDB(ctx, &newTestDB); err != nil { - return db.TestDatabase{}, err + reg = trace.StartRegion(ctx, "worker_db_operation") + err := pool.recreateDB(ctx, &newTestDB) + reg.End() + + if err != nil { + return err } // add new test DB to the pool pool.dbs = append(pool.dbs, newTestDB) - return newTestDB.TestDatabase, nil - // HashPool unlocked - // ! -} - -// Select a longest issued DB from the dirty channel and push it to the waitingForCleaning channel. -// Wait until there is a dirty DB... -func (pool *HashPool) pushNotReturnedDirtyToCleaning() { + pool.ready <- newTestDB.ID - select { - case id := <-pool.dirty: - // fmt.Printf("pushNotReturnedDirtyToCleaning %d\n", id) - pool.Lock() - defer pool.Unlock() - testDB := pool.dbs[id] - testDB.state = dbStateWaitingForCleaning - pool.dbs[id] = testDB - pool.waitingForCleaning <- id - default: - // noop - } + return nil } func (pool *HashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) error { - // stop the worker - // we don't close here because if the remove operation fails, we want to be able to repeat it - for i := 0; i < pool.NumOfWorkers; i++ { - pool.waitingForCleaning <- stopWorkerMessage - } - pool.wg.Wait() + // stop all workers + pool.Stop() // ! // HashPool locked @@ -430,7 +438,7 @@ func (pool *HashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) er // close all only if removal of all succeeded pool.dbs = nil - close(pool.waitingForCleaning) + close(pool.dirty) return nil // HashPool unlocked diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index e85e6dd..27d8b74 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -14,11 +14,12 @@ import ( var ErrUnknownHash = errors.New("no database pool exists for this hash") type PoolConfig struct { - MaxPoolSize int - InitialPoolSize int - TestDBNamePrefix string - NumOfWorkers int // Number of cleaning workers (each hash pool runs this number of workers). - EnableDBRecreate bool // Enables recreating test databases with the cleanup workers. If this flag is on, it's no longer possible to reuse dirty (currently in use, 'locked') databases when MAX pool size is reached. + MaxPoolSize int + InitialPoolSize int + TestDBNamePrefix string + NumOfWorkers int // Number of cleaning workers (each hash pool runs this number of workers). + EnableDBRecreate bool // Enables recreating test databases with the cleanup workers. If this flag is on, it's no longer possible to reuse dirty (currently in use, 'locked') databases when MAX pool size is reached. + MaxConcurrentTasks int } type PoolCollection struct { @@ -65,6 +66,7 @@ func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Databas // Create a new HashPool. If recreating is enabled, workers start automatically. pool := NewHashPool(cfg, templateDB, initDBFunc) + pool.Start() // pool is ready p.pools[pool.templateDB.TemplateHash] = pool @@ -109,19 +111,6 @@ func (p *PoolCollection) AddTestDatabase(ctx context.Context, templateDB db.Data return pool.AddTestDatabase(ctx, templateDB) } -// ExtendPool adds a new test DB to the pool, creates it according to the template, and returns it right away to the caller. -// The new test DB is marked as 'IsUse' and won't be picked up with GetTestDatabase, until it's returned to the pool. -func (p *PoolCollection) ExtendPool(ctx context.Context, templateDB db.Database) (db.TestDatabase, error) { - hash := templateDB.TemplateHash - - pool, err := p.getPool(ctx, hash) - if err != nil { - return db.TestDatabase{}, err - } - - return pool.ExtendPool(ctx, templateDB) -} - // RecreateTestDatabase recreates the given test DB and returns it back to the pool. // To have it recreated, it is added to 'waitingForCleaning' channel. // If the test DB is in a different state than 'dirty', ErrInvalidState is returned. diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index 38c14f4..b9829c0 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -9,6 +9,7 @@ import ( "github.com/allaboutapps/integresql/pkg/db" "github.com/allaboutapps/integresql/pkg/pool" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestPoolAddGet(t *testing.T) { @@ -37,6 +38,7 @@ func TestPoolAddGet(t *testing.T) { return nil } p.InitHashPool(ctx, templateDB, initFunc, true /*enableDBRecreate*/) + t.Cleanup(func() { p.Stop() }) // get from empty _, err := p.GetTestDatabase(ctx, hash1, 0) @@ -45,7 +47,7 @@ func TestPoolAddGet(t *testing.T) { // add a new one assert.NoError(t, p.AddTestDatabase(ctx, templateDB)) // get it - testDB, err := p.GetTestDatabase(ctx, hash1, 0) + testDB, err := p.GetTestDatabase(ctx, hash1, 100*time.Millisecond) assert.NoError(t, err) assert.Equal(t, "prefix_h1_000", testDB.Database.Config.Database) assert.Equal(t, "ich", testDB.Database.Config.Username) @@ -70,8 +72,6 @@ func TestPoolAddGet(t *testing.T) { assert.NoError(t, err) assert.Equal(t, hash2, testDB2.TemplateHash) assert.NotEqual(t, testDB1.ID, testDB2.ID) - - p.Stop() } func TestPoolAddGetConcurrent(t *testing.T) { @@ -91,38 +91,25 @@ func TestPoolAddGetConcurrent(t *testing.T) { return nil } + maxPoolSize := 15 cfg := pool.PoolConfig{ - MaxPoolSize: 6, + MaxPoolSize: maxPoolSize, + InitialPoolSize: maxPoolSize, NumOfWorkers: 4, TestDBNamePrefix: "", EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) + t.Cleanup(func() { p.Stop() }) var wg sync.WaitGroup - sleepDuration := 100 * time.Millisecond + sleepDuration := 10 * time.Millisecond // initialize hash pool + // initial test databases will be added automatically p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) - // add DB in one goroutine - wg.Add(1) - go func() { - defer wg.Done() - - templateDB1 := templateDB1 - templateDB2 := templateDB2 - sleepDuration := sleepDuration - - // add DBs sequentially - for i := 0; i < cfg.MaxPoolSize; i++ { - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) - time.Sleep(sleepDuration) - } - }() - // try to get them from another goroutines in parallel getDB := func(hash string) { defer wg.Done() @@ -142,7 +129,7 @@ func TestPoolAddGetConcurrent(t *testing.T) { } wg.Wait() - p.Stop() + } func TestPoolAddGetReturnConcurrent(t *testing.T) { @@ -158,28 +145,32 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { TemplateHash: hash2, } initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { - t.Log("(re)create ", testDB.Database) return nil } cfg := pool.PoolConfig{ - MaxPoolSize: 6, + MaxPoolSize: 40, NumOfWorkers: 4, TestDBNamePrefix: "", EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) + t.Cleanup(func() { p.Stop() }) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) var wg sync.WaitGroup // add DBs sequentially - for i := 0; i < cfg.MaxPoolSize/2; i++ { + for i := 0; i < cfg.MaxPoolSize/4; i++ { assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) } + // stop the workers to prevent auto cleaning in background + p.Stop() + // try to get them from another goroutines in parallel getAndReturnDB := func(hash string) { defer wg.Done() @@ -191,14 +182,13 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { assert.NoError(t, p.ReturnTestDatabase(ctx, hash, db.ID)) } - for i := 0; i < cfg.MaxPoolSize*3; i++ { + for i := 0; i < cfg.MaxPoolSize; i++ { wg.Add(2) go getAndReturnDB(hash1) go getAndReturnDB(hash2) } wg.Wait() - p.Stop() } func TestPoolRemoveAll(t *testing.T) { @@ -229,6 +219,8 @@ func TestPoolRemoveAll(t *testing.T) { EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) + t.Cleanup(func() { p.Stop() }) + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) @@ -253,80 +245,9 @@ func TestPoolRemoveAll(t *testing.T) { testDB, err := p.GetTestDatabase(ctx, hash1, 0) assert.NoError(t, err) assert.Equal(t, 0, testDB.ID) - - p.Stop() } -func TestPoolInit(t *testing.T) { - t.Parallel() - ctx := context.Background() - - hash1 := "h1" - templateDB1 := db.Database{ - TemplateHash: hash1, - } - - initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { - t.Log("(re)create ", testDB.Database) - return nil - } - - cfg := pool.PoolConfig{ - MaxPoolSize: 100, - NumOfWorkers: 150, - TestDBNamePrefix: "", - EnableDBRecreate: true, - } - p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) - - // we will test 2 ways of adding new DBs - for i := 0; i < cfg.MaxPoolSize/2; i++ { - // add and get freshly added DB - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) - _, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) - assert.NoError(t, err) - - // extend pool (= add and get) - _, err = p.ExtendPool(ctx, templateDB1) - assert.NoError(t, err) - } - - // there should be no more free DBs - _, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, 10*time.Millisecond) - assert.ErrorIs(t, err, pool.ErrTimeout) - - var wg sync.WaitGroup - // now return them all - wg.Add(1) - go func() { - defer wg.Done() - maxPoolSize := cfg.MaxPoolSize - templateHash := templateDB1.TemplateHash - for i := 0; i < maxPoolSize; i++ { - assert.NoError(t, p.ReturnTestDatabase(ctx, templateHash, i)) - } - }() - - // and check that they can be get again - // = the workers cleaned them up - wg.Add(1) - go func() { - defer wg.Done() - maxPoolSize := cfg.MaxPoolSize - templateHash := templateDB1.TemplateHash - for i := 0; i < maxPoolSize; i++ { - _, err := p.GetTestDatabase(ctx, templateHash, 10*time.Millisecond) - assert.NoError(t, err) - } - }() - - wg.Wait() - - p.Stop() -} - -func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { +func TestPoolReuseDirty(t *testing.T) { t.Parallel() ctx := context.Background() @@ -343,25 +264,22 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { return nil } + maxPoolSize := 40 cfg := pool.PoolConfig{ - MaxPoolSize: 40, + MaxPoolSize: maxPoolSize, + InitialPoolSize: maxPoolSize, NumOfWorkers: 1, TestDBNamePrefix: "test_", EnableDBRecreate: false, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, false /*enableDBRecreate*/) - for i := 0; i < cfg.MaxPoolSize; i++ { - // add and get freshly added DB - // LEGACY HANDLING not supported! - _, err := p.ExtendPool(ctx, templateDB1) - assert.Error(t, err) - } + p.InitHashPool(ctx, templateDB1, initFunc, false /*enableDBRecreate*/) + t.Cleanup(func() { p.Stop() }) - forceExtend := func(seenIDMap *sync.Map) { - newTestDB1, err := p.ExtendPool(ctx, templateDB1) - assert.Error(t, err) + getDirty := func(seenIDMap *sync.Map) { + newTestDB1, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, 10*time.Millisecond) + assert.NoError(t, err) seenIDMap.Store(newTestDB1.ID, true) } @@ -372,20 +290,17 @@ func TestPoolExtendRecyclingInUseTestDB(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - forceExtend(&seenIDMap) + getDirty(&seenIDMap) }() } wg.Wait() - // NOPE, not supported! - // for id := 0; id < cfg.MaxPoolSize; id++ { - // _, ok := seenIDMap.Load(id) - // // every index that %5 != 0 should show up at least once - // assert.True(t, ok, id) - // } - - p.Stop() + for id := 0; id < cfg.MaxPoolSize; id++ { + _, ok := seenIDMap.Load(id) + // every index should show up at least once + assert.True(t, ok, id) + } } func TestPoolReturnTestDatabase(t *testing.T) { @@ -411,73 +326,31 @@ func TestPoolReturnTestDatabase(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 40, + MaxPoolSize: 10, NumOfWorkers: 3, EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) - - for i := 0; i < cfg.MaxPoolSize; i++ { - testDB, err := p.ExtendPool(ctx, templateDB1) - assert.NoError(t, err) - // return - don't recreate, just bring back directly to the pool - assert.NoError(t, p.ReturnTestDatabase(ctx, hash1, testDB.ID)) - } + t.Cleanup(func() { p.Stop() }) - for id := 0; id < cfg.MaxPoolSize; id++ { - recreatedTimes, ok := recreateTimesMap.Load(id) - assert.True(t, ok) - assert.Equal(t, 1, recreatedTimes) // just once to initialize it - } + p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) + // add just one test DB + require.NoError(t, p.AddTestDatabase(ctx, templateDB1)) + // stop the workers to prevent auto cleaning in background p.Stop() -} - -func TestPoolRecreateTestDatabase(t *testing.T) { - t.Parallel() - ctx := context.Background() - - hash1 := "h1" - templateDB1 := db.Database{ - TemplateHash: hash1, - Config: db.DatabaseConfig{ - Database: "h1_template", - }, - } - - recreateTimesMap := sync.Map{} - initFunc := func(ctx context.Context, testDB db.TestDatabase, templateName string) error { - times, existing := recreateTimesMap.LoadOrStore(testDB.ID, 1) - if existing { - recreateTimesMap.Store(testDB.ID, times.(int)+1) - } - - return nil - } - - cfg := pool.PoolConfig{ - MaxPoolSize: 40, - NumOfWorkers: 3, - EnableDBRecreate: true, - } - p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) - for i := 0; i < cfg.MaxPoolSize; i++ { - testDB, err := p.ExtendPool(ctx, templateDB1) - assert.NoError(t, err) - // recreate - add for cleaning - assert.NoError(t, p.RecreateTestDatabase(ctx, hash1, testDB.ID)) - } + testDB1, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) + assert.NoError(t, err) - time.Sleep(100 * time.Millisecond) // wait a tiny bit to have all DB cleaned up + // assert that workers are stopped and no new DB showed up + _, err = p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) + assert.ErrorIs(t, err, pool.ErrTimeout) - for id := 0; id < cfg.MaxPoolSize; id++ { - recreatedTimes, ok := recreateTimesMap.Load(id) - assert.True(t, ok) - assert.Equal(t, 2, recreatedTimes) // first time to initialize it, second to clean it - } + // return and get the same one + assert.NoError(t, p.ReturnTestDatabase(ctx, hash1, testDB1.ID)) + testDB2, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) + assert.NoError(t, err) + assert.Equal(t, testDB1.ID, testDB2.ID) - p.Stop() } From ee3e8e907192e394d8c4954769eb2454c03a79cf Mon Sep 17 00:00:00 2001 From: anjankow Date: Sat, 22 Jul 2023 09:19:36 +0000 Subject: [PATCH 129/160] remove /recreate endpoint and all related config --- internal/api/templates/routes.go | 1 - internal/api/templates/templates.go | 32 +---------- pkg/manager/helpers_test.go | 7 +-- pkg/manager/manager.go | 61 +++------------------ pkg/manager/manager_config.go | 10 ++-- pkg/manager/manager_test.go | 65 ++++++++--------------- pkg/pool/pool.go | 50 ++++++++--------- pkg/pool/pool_collection.go | 28 ++-------- pkg/pool/pool_collection_test.go | 37 ++++++------- pkg/templates/template.go | 11 +--- pkg/templates/template_collection_test.go | 3 -- 11 files changed, 79 insertions(+), 226 deletions(-) diff --git a/internal/api/templates/routes.go b/internal/api/templates/routes.go index 2ede71f..bfd7120 100644 --- a/internal/api/templates/routes.go +++ b/internal/api/templates/routes.go @@ -11,7 +11,6 @@ func InitRoutes(s *api.Server) { g.GET("/:hash/tests", getTestDatabase(s)) g.DELETE("/:hash/tests/:id", deleteReturnTestDatabase(s)) // deprecated, use POST /unlock instead - g.POST("/:hash/tests/:id/recreate", postRecreateTestDatabase(s)) g.POST("/:hash/tests/:id/unlock", postUnlockTestDatabase(s)) } diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 9581cae..5bae7f1 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -14,8 +14,7 @@ import ( func postInitializeTemplate(s *api.Server) echo.HandlerFunc { type requestPayload struct { - Hash string `json:"hash"` - EnableDBRecreate bool `json:"enableRecreate"` + Hash string `json:"hash"` } return func(c echo.Context) error { @@ -32,7 +31,7 @@ func postInitializeTemplate(s *api.Server) echo.HandlerFunc { ctx, cancel := context.WithTimeout(c.Request().Context(), 30*time.Second) defer cancel() - template, err := s.Manager.InitializeTemplateDatabase(ctx, payload.Hash, payload.EnableDBRecreate) + template, err := s.Manager.InitializeTemplateDatabase(ctx, payload.Hash) if err != nil { switch err { case manager.ErrManagerNotReady: @@ -126,33 +125,6 @@ func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return postUnlockTestDatabase(s) } -func postRecreateTestDatabase(s *api.Server) echo.HandlerFunc { - return func(c echo.Context) error { - hash := c.Param("hash") - id, err := strconv.Atoi(c.Param("id")) - if err != nil { - return echo.NewHTTPError(http.StatusBadRequest, "invalid test database ID") - } - - if err := s.Manager.RecreateTestDatabase(c.Request().Context(), hash, id); err != nil { - switch err { - case manager.ErrManagerNotReady: - return echo.ErrServiceUnavailable - case manager.ErrTemplateNotFound: - return echo.NewHTTPError(http.StatusNotFound, "template not found") - case manager.ErrTestNotFound: - return echo.NewHTTPError(http.StatusNotFound, "test database not found") - case pool.ErrTestDBInUse: - return echo.NewHTTPError(http.StatusLocked, pool.ErrTestDBInUse.Error()) - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) - } - } - - return c.NoContent(http.StatusNoContent) - } -} - func postUnlockTestDatabase(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { hash := c.Param("hash") diff --git a/pkg/manager/helpers_test.go b/pkg/manager/helpers_test.go index 5ca8c04..27c5048 100644 --- a/pkg/manager/helpers_test.go +++ b/pkg/manager/helpers_test.go @@ -51,12 +51,9 @@ func disconnectManager(t *testing.T, m *manager.Manager) { } -func initTemplateDB(ctx context.Context, errs chan<- error, m *manager.Manager, enableDBRecreate ...bool) { +func initTemplateDB(ctx context.Context, errs chan<- error, m *manager.Manager) { - // true by default - enableDBRecreateFlag := !(len(enableDBRecreate) > 0 && !enableDBRecreate[0]) - - template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash", enableDBRecreateFlag) + template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash") if err != nil { errs <- err return diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index cd2a5e3..889c54d 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -55,8 +55,8 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { config.TestDatabaseOwnerPassword = config.ManagerDatabaseConfig.Password } - // Legacy handling does not support TestDatabaseInitialPoolSize=0 - if !config.TestDatabaseEnableRecreate && config.TestDatabaseInitialPoolSize == 0 { + // at least one test database needs to be present initially + if config.TestDatabaseInitialPoolSize == 0 { config.TestDatabaseInitialPoolSize = 1 } @@ -75,7 +75,6 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { InitialPoolSize: config.TestDatabaseInitialPoolSize, TestDBNamePrefix: testDBPrefix, NumOfWorkers: config.NumOfCleaningWorkers, - EnableDBRecreate: config.TestDatabaseEnableRecreate, }, ), connectionCtx: context.TODO(), @@ -183,7 +182,7 @@ func (m *Manager) Initialize(ctx context.Context) error { return nil } -func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, enableDBRecreate bool) (db.TemplateDatabase, error) { +func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { ctx, task := trace.NewTask(ctx, "initialize_template_db") defer task.End() @@ -191,11 +190,6 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, e return db.TemplateDatabase{}, ErrManagerNotReady } - if !m.config.TestDatabaseEnableRecreate { - // only if the main config allows for DB recreate, it can be enabled - enableDBRecreate = false - } - dbName := m.makeTemplateDatabaseName(hash) templateConfig := templates.TemplateConfig{ DatabaseConfig: db.DatabaseConfig{ @@ -205,7 +199,6 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string, e Password: m.config.ManagerDatabaseConfig.Password, Database: dbName, }, - RecreateEnabled: enableDBRecreate, } added, unlock := m.templates.Push(ctx, hash, templateConfig) @@ -304,7 +297,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db } // Init a pool with this hash - m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB, template.RecreateEnabled) + m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB) lockedTemplate.SetState(ctx, templates.TemplateStateFinalized) @@ -312,7 +305,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db } // GetTestDatabase tries to get a ready test DB from an existing pool. -// If no DB is ready after the preconfigured timeout, it tries to extend the pool and therefore create a new DB. +// If no DB is ready after the preconfigured timeout, ErrTimeout is returned. func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatabase, error) { ctx, task := trace.NewTask(ctx, "get_test_db") defer task.End() @@ -340,7 +333,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData // Template exists, but the pool is not there - // it must have been removed. // It needs to be reinitialized. - m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB, template.IsRecreateEnabled(ctx)) + m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB) testDB, err = m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseGetTimeout) } @@ -389,48 +382,6 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e return nil } -// RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. -func (m *Manager) RecreateTestDatabase(ctx context.Context, hash string, id int) error { - ctx, task := trace.NewTask(ctx, "recreate_test_db") - defer task.End() - - if !m.Ready() { - return ErrManagerNotReady - } - - // check if the template exists and is finalized - template, found := m.templates.Get(ctx, hash) - if !found { - return m.dropDatabaseWithID(ctx, hash, id) - } - - // don't allow to recreate if it's not enabled for this template - if !template.IsRecreateEnabled(ctx) { - return nil - } - - if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != - templates.TemplateStateFinalized { - - return ErrInvalidTemplateState - } - - // template is ready, we can returb the testDB to the pool and have it cleaned up - if err := m.pool.RecreateTestDatabase(ctx, hash, id); err != nil { - if !(errors.Is(err, pool.ErrInvalidIndex) || - errors.Is(err, pool.ErrUnknownHash)) { - // other error is an internal error - return err - } - - // db is not tracked in the pool - // try to drop it if exists - return m.dropDatabaseWithID(ctx, hash, id) - } - - return nil -} - func (m *Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) error { if !m.Ready() { return ErrManagerNotReady diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index ebe68d6..c7f3fbd 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -22,7 +22,6 @@ type ManagerConfig struct { TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state TestDatabaseGetTimeout time.Duration // Time to wait for a ready database before extending the pool NumOfCleaningWorkers int // Number of pool workers cleaning up dirty DBs - TestDatabaseEnableRecreate bool // Enables recreating test databases with the cleanup workers. If this flag is on, it's no longer possible to reuse dirty (currently in use, 'locked') databases when MAX pool size is reached. } func DefaultManagerConfigFromEnv() ManagerConfig { @@ -60,10 +59,9 @@ func DefaultManagerConfigFromEnv() ManagerConfig { // TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", runtime.NumCPU()), // TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), - TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", runtime.NumCPU()*4), - TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 60000)), // TODO eventually even bigger defaults? - TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), // only used when INTEGRESQL_TEST_DB_FORCE_RETURN=true - NumOfCleaningWorkers: util.GetEnvAsInt("INTEGRESQL_NUM_OF_CLEANING_WORKERS", runtime.NumCPU()), - TestDatabaseEnableRecreate: util.GetEnvAsBool("INTEGRESQL_TEST_DB_ENABLE_RECREATE", false) || util.GetEnvAsBool("INTEGRESQL_TEST_DB_FORCE_RETURN", false), + TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", runtime.NumCPU()*4), + TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 60000)), // TODO eventually even bigger defaults? + TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), // only used when INTEGRESQL_TEST_DB_FORCE_RETURN=true + NumOfCleaningWorkers: util.GetEnvAsInt("INTEGRESQL_NUM_OF_CLEANING_WORKERS", runtime.NumCPU()), } } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 59797bd..d7db13a 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -107,7 +107,7 @@ func TestManagerInitializeTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -129,7 +129,7 @@ func TestManagerInitializeTemplateDatabaseTimeout(t *testing.T) { ctxt, cancel := context.WithTimeout(ctx, 10*time.Nanosecond) defer cancel() - _, err := m.InitializeTemplateDatabase(ctxt, hash, true /* enableDBRecreate */) + _, err := m.InitializeTemplateDatabase(ctxt, hash) if err != context.DeadlineExceeded { t.Fatalf("received unexpected error, got %v, want %v", err, context.DeadlineExceeded) } @@ -205,7 +205,7 @@ func TestManagerFinalizeTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -287,7 +287,7 @@ func TestManagerGetTestDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -323,7 +323,7 @@ func TestManagerGetTestDatabaseExtendPoolOnDemandLegacy(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -347,7 +347,6 @@ func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond // no db created initally in the background cfg.TestDatabaseInitialPoolSize = 0 - cfg.TestDatabaseEnableRecreate = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -358,7 +357,7 @@ func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -379,7 +378,6 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrentlyLegacy(t *testing. ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseEnableRecreate = false cfg.TemplateFinalizeTimeout = 1 * time.Second m, _ := testManagerWithConfig(cfg) @@ -391,7 +389,7 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrentlyLegacy(t *testing. hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, false) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -426,7 +424,6 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseEnableRecreate = true cfg.TemplateFinalizeTimeout = 1 * time.Second m, _ := testManagerWithConfig(cfg) @@ -438,7 +435,7 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -500,7 +497,7 @@ func TestManagerGetTestDatabaseConcurrently(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -566,7 +563,7 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -634,7 +631,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -690,7 +687,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { t.Fatalf("finalize template should not work: %v", err) } - _, err = m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) + _, err = m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("reinitialize after discard template database should work: %v", err) } @@ -707,7 +704,6 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseInitialPoolSize = 3 cfg.TestDatabaseMaxPoolSize = 3 - cfg.TestDatabaseEnableRecreate = true cfg.TestDatabaseGetTimeout = 200 * time.Millisecond m, _ := testManagerWithConfig(cfg) @@ -719,7 +715,7 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -754,7 +750,6 @@ func TestManagerGetTestDatabaseExtendingPoolForceReturn(t *testing.T) { cfg.TestDatabaseMaxPoolSize = 10 cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond // force DB return - cfg.TestDatabaseEnableRecreate = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -765,7 +760,7 @@ func TestManagerGetTestDatabaseExtendingPoolForceReturn(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -807,8 +802,6 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseInitialPoolSize = 5 cfg.TestDatabaseMaxPoolSize = 5 - // enable reusing old not returned databases - cfg.TestDatabaseEnableRecreate = false m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -819,7 +812,7 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate /*enableDBRecreate */) + template, err := m.InitializeTemplateDatabase(ctx, hash /*enableDBRecreate */) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -872,14 +865,13 @@ func TestManagerGetTestDatabaseForUnknownTemplate(t *testing.T) { } } -func TestManagerReturnRecreateTestDatabase(t *testing.T) { +func TestManagerReturnTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseInitialPoolSize = 10 cfg.NumOfCleaningWorkers = 2 cfg.TestDatabaseMaxPoolSize = 10 - cfg.TestDatabaseEnableRecreate = true cfg.TestDatabaseGetTimeout = 200 * time.Millisecond tests := []struct { @@ -887,17 +879,6 @@ func TestManagerReturnRecreateTestDatabase(t *testing.T) { giveBackFunc func(m *manager.Manager, ctx context.Context, hash string, id int) error resultCheck func(row *sql.Row, id int) }{ - { - name: "Recreate", - giveBackFunc: func(m *manager.Manager, ctx context.Context, hash string, id int) error { - return m.RecreateTestDatabase(ctx, hash, id) - }, - resultCheck: func(row *sql.Row, id int) { - assert.NoError(t, row.Err()) - var name string - assert.ErrorIs(t, row.Scan(&name), sql.ErrNoRows, id) - }, - }, { name: "Return", giveBackFunc: func(m *manager.Manager, ctx context.Context, hash string, id int) error { @@ -926,7 +907,7 @@ func TestManagerReturnRecreateTestDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -983,7 +964,6 @@ func TestManagerRecreateTestDatabaseRecreateDisabled(t *testing.T) { cfg.TestDatabaseInitialPoolSize = 5 cfg.NumOfCleaningWorkers = 2 cfg.TestDatabaseMaxPoolSize = 10 - cfg.TestDatabaseEnableRecreate = true cfg.TestDatabaseGetTimeout = 200 * time.Millisecond m, _ := testManagerWithConfig(cfg) @@ -996,7 +976,7 @@ func TestManagerRecreateTestDatabaseRecreateDisabled(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, false /*enableRecreate*/) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -1048,7 +1028,7 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -1096,7 +1076,7 @@ func TestManagerReturnUnknownTemplateDatabase(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -1126,7 +1106,7 @@ func TestManagerMultiFinalize(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, true /*enableDBRecreate */) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } @@ -1174,7 +1154,6 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() // there are no db added in background cfg.TestDatabaseInitialPoolSize = 0 - cfg.TestDatabaseEnableRecreate = true m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -1185,7 +1164,7 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { hash := "hashinghash" - template, err := m.InitializeTemplateDatabase(ctx, hash, cfg.TestDatabaseEnableRecreate) + template, err := m.InitializeTemplateDatabase(ctx, hash) if err != nil { t.Fatalf("failed to initialize template database: %v", err) } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 3de8377..fbb426f 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -187,36 +187,32 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string var semaphore = make(chan struct{}, pool.MaxConcurrentTasks) for task := range taskChan { - switch task { - case workerTaskStop: + handler, ok := handlers[task] + if !ok { + fmt.Printf("invalid task: %s", task) + continue + } + + select { + case <-ctx.Done(): return - default: - handler, ok := handlers[task] - if !ok { - fmt.Printf("invalid task: %s", task) - continue - } + case semaphore <- struct{}{}: + } - select { - case <-ctx.Done(): - return - case semaphore <- struct{}{}: - } + pool.wg.Add(1) + go func(task string) { - pool.wg.Add(1) - go func(task string) { + defer func() { + pool.wg.Done() + <-semaphore + }() - defer func() { - pool.wg.Done() - <-semaphore - }() + // fmt.Println("task", task) + if err := handler(ctx); err != nil { + // fmt.Println("task", task, "failed:", err.Error()) + } + }(task) - // fmt.Println("task", task) - if err := handler(ctx); err != nil { - // fmt.Println("task", task, "failed:", err.Error()) - } - }(task) - } } } @@ -234,8 +230,8 @@ func (pool *HashPool) controlLoop() { for task := range pool.tasksChan { if task == workerTaskStop { + close(workerTasksChan) cancel() - workerTasksChan <- task return } @@ -438,7 +434,7 @@ func (pool *HashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) er // close all only if removal of all succeeded pool.dbs = nil - close(pool.dirty) + close(pool.tasksChan) return nil // HashPool unlocked diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index 27d8b74..36d8934 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -17,8 +17,7 @@ type PoolConfig struct { MaxPoolSize int InitialPoolSize int TestDBNamePrefix string - NumOfWorkers int // Number of cleaning workers (each hash pool runs this number of workers). - EnableDBRecreate bool // Enables recreating test databases with the cleanup workers. If this flag is on, it's no longer possible to reuse dirty (currently in use, 'locked') databases when MAX pool size is reached. + NumOfWorkers int // Number of cleaning workers (each hash pool runs this number of workers). MaxConcurrentTasks int } @@ -54,17 +53,13 @@ func makeActualRecreateTestDBFunc(templateName string, userRecreateFunc Recreate type recreateTestDBFunc func(context.Context, *existingDB) error // InitHashPool creates a new pool with a given template hash and starts the cleanup workers. -func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc, enableDBRecreate bool) { +func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { p.mutex.Lock() defer p.mutex.Unlock() cfg := p.PoolConfig - if p.EnableDBRecreate { - // only if the main config allows for DB recreate, it can be enabled - cfg.EnableDBRecreate = enableDBRecreate - } - // Create a new HashPool. If recreating is enabled, workers start automatically. + // Create a new HashPool pool := NewHashPool(cfg, templateDB, initDBFunc) pool.Start() @@ -98,8 +93,7 @@ func (p *PoolCollection) GetTestDatabase(ctx context.Context, hash string, timeo // AddTestDatabase adds a new test DB to the pool and creates it according to the template. // The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. -// If the pool size has already reached MAX, ErrPoolFull is returned, unless EnableDBRecreate flag is set to false. -// Then databases that were given away would get recreate (if no DB connection is currently open) and marked as 'Ready'. +// If the pool size has already reached MAX, ErrPoolFull is returned. func (p *PoolCollection) AddTestDatabase(ctx context.Context, templateDB db.Database) error { hash := templateDB.TemplateHash @@ -111,21 +105,7 @@ func (p *PoolCollection) AddTestDatabase(ctx context.Context, templateDB db.Data return pool.AddTestDatabase(ctx, templateDB) } -// RecreateTestDatabase recreates the given test DB and returns it back to the pool. -// To have it recreated, it is added to 'waitingForCleaning' channel. -// If the test DB is in a different state than 'dirty', ErrInvalidState is returned. -func (p *PoolCollection) RecreateTestDatabase(ctx context.Context, hash string, id int) error { - - pool, err := p.getPool(ctx, hash) - if err != nil { - return err - } - - return pool.RecreateTestDatabase(ctx, hash, id) -} - // ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). -// If the test DB is in a different state than 'dirty', ErrInvalidState is returned. func (p *PoolCollection) ReturnTestDatabase(ctx context.Context, hash string, id int) error { pool, err := p.getPool(ctx, hash) if err != nil { diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index b9829c0..6afb802 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -20,7 +20,6 @@ func TestPoolAddGet(t *testing.T) { MaxPoolSize: 2, NumOfWorkers: 4, TestDBNamePrefix: "prefix_", - EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) @@ -37,7 +36,7 @@ func TestPoolAddGet(t *testing.T) { t.Log("(re)create ", testDB.Database) return nil } - p.InitHashPool(ctx, templateDB, initFunc, true /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB, initFunc) t.Cleanup(func() { p.Stop() }) // get from empty @@ -55,7 +54,7 @@ func TestPoolAddGet(t *testing.T) { // add for h2 templateDB2 := templateDB templateDB2.TemplateHash = hash2 - p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB2, initFunc) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) assert.ErrorIs(t, p.AddTestDatabase(ctx, templateDB2), pool.ErrPoolFull) @@ -97,7 +96,6 @@ func TestPoolAddGetConcurrent(t *testing.T) { InitialPoolSize: maxPoolSize, NumOfWorkers: 4, TestDBNamePrefix: "", - EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) @@ -107,8 +105,8 @@ func TestPoolAddGetConcurrent(t *testing.T) { // initialize hash pool // initial test databases will be added automatically - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) - p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB2, initFunc) // try to get them from another goroutines in parallel getDB := func(hash string) { @@ -152,13 +150,12 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { MaxPoolSize: 40, NumOfWorkers: 4, TestDBNamePrefix: "", - EnableDBRecreate: true, } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) - p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB2, initFunc) var wg sync.WaitGroup @@ -213,16 +210,14 @@ func TestPoolRemoveAll(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 6, - NumOfWorkers: 4, - TestDBNamePrefix: "", - EnableDBRecreate: true, + MaxPoolSize: 6, + NumOfWorkers: 4, } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) - p.InitHashPool(ctx, templateDB2, initFunc, true /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB1, initFunc) + p.InitHashPool(ctx, templateDB2, initFunc) // add DBs sequentially for i := 0; i < cfg.MaxPoolSize; i++ { @@ -240,7 +235,7 @@ func TestPoolRemoveAll(t *testing.T) { assert.Error(t, err, pool.ErrTimeout) // start using pool again - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB1, initFunc) assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) testDB, err := p.GetTestDatabase(ctx, hash1, 0) assert.NoError(t, err) @@ -270,11 +265,10 @@ func TestPoolReuseDirty(t *testing.T) { InitialPoolSize: maxPoolSize, NumOfWorkers: 1, TestDBNamePrefix: "test_", - EnableDBRecreate: false, } p := pool.NewPoolCollection(cfg) - p.InitHashPool(ctx, templateDB1, initFunc, false /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB1, initFunc) t.Cleanup(func() { p.Stop() }) getDirty := func(seenIDMap *sync.Map) { @@ -326,14 +320,13 @@ func TestPoolReturnTestDatabase(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 10, - NumOfWorkers: 3, - EnableDBRecreate: true, + MaxPoolSize: 10, + NumOfWorkers: 3, } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) - p.InitHashPool(ctx, templateDB1, initFunc, true /*enableDBRecreate*/) + p.InitHashPool(ctx, templateDB1, initFunc) // add just one test DB require.NoError(t, p.AddTestDatabase(ctx, templateDB1)) diff --git a/pkg/templates/template.go b/pkg/templates/template.go index 2fe4d6f..1911234 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -28,7 +28,6 @@ type Template struct { type TemplateConfig struct { db.DatabaseConfig - RecreateEnabled bool } func NewTemplate(hash string, config TemplateConfig) *Template { @@ -42,13 +41,6 @@ func NewTemplate(hash string, config TemplateConfig) *Template { return t } -func (t *Template) IsRecreateEnabled(ctx context.Context) bool { - t.mutex.RLock() - defer t.mutex.RUnlock() - - return t.RecreateEnabled -} - func (t *Template) GetConfig(ctx context.Context) TemplateConfig { t.mutex.RLock() defer t.mutex.RUnlock() @@ -131,6 +123,5 @@ func (l lockedTemplate) SetState(ctx context.Context, newState TemplateState) { } func (c TemplateConfig) Equals(other TemplateConfig) bool { - return c.RecreateEnabled == other.RecreateEnabled && - c.DatabaseConfig.Database == other.DatabaseConfig.Database + return c.DatabaseConfig.ConnectionString() == other.ConnectionString() } diff --git a/pkg/templates/template_collection_test.go b/pkg/templates/template_collection_test.go index 4d4f955..c69fde2 100644 --- a/pkg/templates/template_collection_test.go +++ b/pkg/templates/template_collection_test.go @@ -70,7 +70,6 @@ func TestTemplateCollectionPushWithOtherConfig(t *testing.T) { Username: "ich", Database: "template_test", }, - RecreateEnabled: true, } hash := "123" @@ -82,7 +81,6 @@ func TestTemplateCollectionPushWithOtherConfig(t *testing.T) { assert.False(t, added) unlock() - cfg.RecreateEnabled = false cfg.Database = "template_another" added, unlock = coll.Push(ctx, hash, cfg) assert.True(t, added) @@ -91,7 +89,6 @@ func TestTemplateCollectionPushWithOtherConfig(t *testing.T) { // try to get again when the template is locked template, found := coll.Get(ctx, hash) assert.True(t, found) - assert.False(t, template.RecreateEnabled) assert.Equal(t, "template_another", template.Config.Database) } From e17d08c91b36fec14e67bb9c88dca947213d6d0f Mon Sep 17 00:00:00 2001 From: anjankow Date: Sat, 22 Jul 2023 10:19:19 +0000 Subject: [PATCH 130/160] clean up manager after moving pool size mgmt to pool pkg --- pkg/manager/manager.go | 67 +++----- pkg/manager/manager_test.go | 307 +++++------------------------------- pkg/pool/pool.go | 29 ++-- 3 files changed, 68 insertions(+), 335 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 889c54d..ad9efa0 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -7,12 +7,10 @@ import ( "fmt" "runtime/trace" "strings" - "sync" "github.com/allaboutapps/integresql/pkg/db" "github.com/allaboutapps/integresql/pkg/pool" "github.com/allaboutapps/integresql/pkg/templates" - "github.com/allaboutapps/integresql/pkg/util" "github.com/lib/pq" ) @@ -28,13 +26,9 @@ var ( type Manager struct { config ManagerConfig db *sql.DB - wg sync.WaitGroup templates *templates.Collection pool *pool.PoolCollection - - connectionCtx context.Context // DB connection context used for adding initial DBs in background - cancelConnectionCtx func() // Cancel function for DB connection context } func New(config ManagerConfig) (*Manager, ManagerConfig) { @@ -67,7 +61,6 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { m := &Manager{ config: config, db: nil, - wg: sync.WaitGroup{}, templates: templates.NewCollection(), pool: pool.NewPoolCollection( pool.PoolConfig{ @@ -77,7 +70,6 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { NumOfWorkers: config.NumOfCleaningWorkers, }, ), - connectionCtx: context.TODO(), } return m, m.config @@ -104,12 +96,6 @@ func (m *Manager) Connect(ctx context.Context) error { m.db = db - // set cancellable connection context - // used to stop background tasks - ctx, cancel := context.WithCancel(context.Background()) - m.connectionCtx = ctx - m.cancelConnectionCtx = cancel - return nil } @@ -118,21 +104,8 @@ func (m *Manager) Disconnect(ctx context.Context, ignoreCloseError bool) error { return errors.New("manager is not connected") } - // signal stop to background routines - m.cancelConnectionCtx() - m.connectionCtx = context.TODO() - - _, err := util.WaitWithCancellableCtx(ctx, func(context.Context) (bool, error) { - m.wg.Wait() - return true, nil - }) - - if err != nil { - // we didn't manage to stop on time background routines - // but we will continue and close the DB connection - // TODO anna: error handling - fmt.Println("integresql: timeout when stopping background tasks") - } + // stop the pool before closing DB connection + m.pool.Stop() if err := m.db.Close(); err != nil && !ignoreCloseError { return err @@ -151,7 +124,7 @@ func (m *Manager) Reconnect(ctx context.Context, ignoreDisconnectError bool) err return m.Connect(ctx) } -func (m *Manager) Ready() bool { +func (m Manager) Ready() bool { return m.db != nil } @@ -182,7 +155,7 @@ func (m *Manager) Initialize(ctx context.Context) error { return nil } -func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { +func (m Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { ctx, task := trace.NewTask(ctx, "initialize_template_db") defer task.End() @@ -233,7 +206,7 @@ func (m *Manager) InitializeTemplateDatabase(ctx context.Context, hash string) ( }, nil } -func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) error { +func (m Manager) DiscardTemplateDatabase(ctx context.Context, hash string) error { ctx, task := trace.NewTask(ctx, "discard_template_db") defer task.End() @@ -242,8 +215,6 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro return ErrManagerNotReady } - m.wg.Wait() - // first remove all DB with this hash if err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB); err != nil && !errors.Is(err, pool.ErrUnknownHash) { return err @@ -270,7 +241,7 @@ func (m *Manager) DiscardTemplateDatabase(ctx context.Context, hash string) erro return m.dropDatabase(ctx, dbName) } -func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { +func (m Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { ctx, task := trace.NewTask(ctx, "finalize_template_db") defer task.End() @@ -306,7 +277,7 @@ func (m *Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db // GetTestDatabase tries to get a ready test DB from an existing pool. // If no DB is ready after the preconfigured timeout, ErrTimeout is returned. -func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatabase, error) { +func (m Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatabase, error) { ctx, task := trace.NewTask(ctx, "get_test_db") defer task.End() @@ -346,7 +317,7 @@ func (m *Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestData } // ReturnTestDatabase returns an unchanged test DB to the pool, allowing for reuse without cleaning. -func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) error { +func (m Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) error { ctx, task := trace.NewTask(ctx, "return_test_db") defer task.End() @@ -382,7 +353,7 @@ func (m *Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) e return nil } -func (m *Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) error { +func (m Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) error { if !m.Ready() { return ErrManagerNotReady } @@ -395,7 +366,7 @@ func (m *Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) er return err } -func (m *Manager) ResetAllTracking(ctx context.Context) error { +func (m Manager) ResetAllTracking(ctx context.Context) error { if !m.Ready() { return ErrManagerNotReady } @@ -410,7 +381,7 @@ func (m *Manager) ResetAllTracking(ctx context.Context) error { return nil } -func (m *Manager) dropDatabaseWithID(ctx context.Context, hash string, id int) error { +func (m Manager) dropDatabaseWithID(ctx context.Context, hash string, id int) error { dbName := m.pool.MakeDBName(hash, id) exists, err := m.checkDatabaseExists(ctx, dbName) if err != nil { @@ -424,7 +395,7 @@ func (m *Manager) dropDatabaseWithID(ctx context.Context, hash string, id int) e return m.dropDatabase(ctx, dbName) } -func (m *Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, error) { +func (m Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, error) { var exists bool // fmt.Printf("SELECT 1 AS exists FROM pg_database WHERE datname = %s\n", dbName) @@ -440,7 +411,7 @@ func (m *Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, return exists, nil } -func (m *Manager) checkDatabaseConnected(ctx context.Context, dbName string) (bool, error) { +func (m Manager) checkDatabaseConnected(ctx context.Context, dbName string) (bool, error) { var countConnected int @@ -459,7 +430,7 @@ func (m *Manager) checkDatabaseConnected(ctx context.Context, dbName string) (bo return false, nil } -func (m *Manager) createDatabase(ctx context.Context, dbName string, owner string, template string) error { +func (m Manager) createDatabase(ctx context.Context, dbName string, owner string, template string) error { defer trace.StartRegion(ctx, "create_db").End() @@ -472,7 +443,7 @@ func (m *Manager) createDatabase(ctx context.Context, dbName string, owner strin return nil } -func (m *Manager) recreateTestPoolDB(ctx context.Context, testDB db.TestDatabase, templateName string) error { +func (m Manager) recreateTestPoolDB(ctx context.Context, testDB db.TestDatabase, templateName string) error { connected, err := m.checkDatabaseConnected(ctx, testDB.Database.Config.Database) @@ -487,11 +458,11 @@ func (m *Manager) recreateTestPoolDB(ctx context.Context, testDB db.TestDatabase return m.dropAndCreateDatabase(ctx, testDB.Database.Config.Database, m.config.TestDatabaseOwner, templateName) } -func (m *Manager) dropTestPoolDB(ctx context.Context, testDB db.TestDatabase) error { +func (m Manager) dropTestPoolDB(ctx context.Context, testDB db.TestDatabase) error { return m.dropDatabase(ctx, testDB.Config.Database) } -func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { +func (m Manager) dropDatabase(ctx context.Context, dbName string) error { defer trace.StartRegion(ctx, "drop_db").End() @@ -508,7 +479,7 @@ func (m *Manager) dropDatabase(ctx context.Context, dbName string) error { return nil } -func (m *Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owner string, template string) error { +func (m Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owner string, template string) error { if !m.Ready() { return ErrManagerNotReady } @@ -520,6 +491,6 @@ func (m *Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owne return m.createDatabase(ctx, dbName, owner, template) } -func (m *Manager) makeTemplateDatabaseName(hash string) string { +func (m Manager) makeTemplateDatabaseName(hash string) string { return fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index d7db13a..aef86ed 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -11,7 +11,6 @@ import ( "github.com/allaboutapps/integresql/pkg/db" "github.com/allaboutapps/integresql/pkg/manager" - "github.com/allaboutapps/integresql/pkg/pool" "github.com/lib/pq" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -306,13 +305,13 @@ func TestManagerGetTestDatabase(t *testing.T) { verifyTestDB(t, test) } -func TestManagerGetTestDatabaseExtendPoolOnDemandLegacy(t *testing.T) { +func TestManagerGetTestDatabaseExtendPool(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond - // no db created initally in the background - cfg.TestDatabaseInitialPoolSize = 0 // LEGACY HANDLING: this will be autotransformed to 1 during init + cfg.TestDatabaseGetTimeout = 300 * time.Millisecond + cfg.TestDatabaseInitialPoolSize = 0 // this will be autotransformed to 1 during init + cfg.TestDatabaseMaxPoolSize = 10 m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -334,47 +333,17 @@ func TestManagerGetTestDatabaseExtendPoolOnDemandLegacy(t *testing.T) { t.Fatalf("failed to finalize template database: %v", err) } - // get should succeed because a test DB is created on demand - testDB, err := m.GetTestDatabase(ctx, hash) - assert.NoError(t, err) - assert.Equal(t, 0, testDB.ID) -} - -func TestManagerGetTestDatabaseExtendPoolOnDemand(t *testing.T) { - ctx := context.Background() - - cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond - // no db created initally in the background - cfg.TestDatabaseInitialPoolSize = 0 - m, _ := testManagerWithConfig(cfg) - - if err := m.Initialize(ctx); err != nil { - t.Fatalf("initializing manager failed: %v", err) - } - - defer disconnectManager(t, m) - - hash := "hashinghash" - - template, err := m.InitializeTemplateDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to initialize template database: %v", err) - } - - populateTemplateDB(t, template) - - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - t.Fatalf("failed to finalize template database: %v", err) + previousID := -1 + // assert than one by one pool will be extended + for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { + testDB, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + assert.Equal(t, previousID+1, testDB.ID) + previousID = testDB.ID } - - // get should succeed because a test DB is created on demand - testDB, err := m.GetTestDatabase(ctx, hash) - assert.NoError(t, err) - assert.Equal(t, 0, testDB.ID) } -func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrentlyLegacy(t *testing.T) { +func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() @@ -420,71 +389,6 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrentlyLegacy(t *testing. assert.Equal(t, "FINALIZE", first) } -func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { - ctx := context.Background() - - cfg := manager.DefaultManagerConfigFromEnv() - cfg.TemplateFinalizeTimeout = 1 * time.Second - m, _ := testManagerWithConfig(cfg) - - if err := m.Initialize(ctx); err != nil { - t.Fatalf("initializing manager failed: %v", err) - } - - defer disconnectManager(t, m) - - hash := "hashinghash" - - template, err := m.InitializeTemplateDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to initialize template database: %v", err) - } - - testCh := make(chan error, 1) - go func() { - _, err := m.GetTestDatabase(ctx, hash) - testCh <- err - }() - - populateTemplateDB(t, template) - - finalizeCh := make(chan error, 1) - go func() { - time.Sleep(500 * time.Millisecond) - - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - finalizeCh <- err - } - - finalizeCh <- nil - }() - - testDone := false - finalizeDone := false - for { - select { - case err := <-testCh: - if err != nil { - t.Fatalf("failed to get test database: %v", err) - } - - testDone = true - case err := <-finalizeCh: - if err != nil { - t.Fatalf("failed to finalize template database: %v", err) - } - - finalizeDone = true - } - - if testDone && finalizeDone { - break - } else if testDone && !finalizeDone { - t.Fatal("getting test database completed before finalizing template database") - } - } -} - func TestManagerGetTestDatabaseConcurrently(t *testing.T) { ctx := context.Background() @@ -698,7 +602,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { } -func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { +func TestManagerGetAndReturnTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() @@ -740,61 +644,6 @@ func TestManagerGetTestDatabaseReusingIDs(t *testing.T) { assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) } -func TestManagerGetTestDatabaseExtendingPoolForceReturn(t *testing.T) { - ctx := context.Background() - - cfg := manager.DefaultManagerConfigFromEnv() - // there is just 1 database initially - cfg.TestDatabaseInitialPoolSize = 1 - // should extend up to 10 on demand - cfg.TestDatabaseMaxPoolSize = 10 - cfg.TestDatabaseGetTimeout = 10 * time.Nanosecond - // force DB return - m, _ := testManagerWithConfig(cfg) - - if err := m.Initialize(ctx); err != nil { - t.Fatalf("initializing manager failed: %v", err) - } - - defer disconnectManager(t, m) - - hash := "hashinghash" - - template, err := m.InitializeTemplateDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to initialize template database: %v", err) - } - - populateTemplateDB(t, template) - - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - t.Fatalf("failed to finalize template database: %v", err) - } - - seenIDs := map[int]bool{} - for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { - test, err := m.GetTestDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to get test database: %v", err) - } - - if _, ok := seenIDs[test.ID]; ok { - t.Errorf("received already seen test database ID %d", test.ID) - } - - seenIDs[test.ID] = true - - // don't return - } - - // should not be able to extend beyond the limit - _, err = m.GetTestDatabase(ctx, hash) - assert.Error(t, err) - - // discard the template - assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) -} - func TestManagerGetTestDatabaseDontReturn(t *testing.T) { ctx := context.Background() @@ -802,6 +651,7 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseInitialPoolSize = 5 cfg.TestDatabaseMaxPoolSize = 5 + cfg.TestDatabaseGetTimeout = time.Second m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -826,21 +676,21 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { var wg sync.WaitGroup for i := 0; i < cfg.TestDatabaseMaxPoolSize*5; i++ { wg.Add(1) - go func() { + go func(i int) { defer wg.Done() testDB, err := m.GetTestDatabase(ctx, hash) - require.NoError(t, err) + require.NoError(t, err, i) db, err := sql.Open("postgres", testDB.Config.ConnectionString()) assert.NoError(t, err) // keep an open DB connection for a while - time.Sleep(200 * time.Millisecond) + time.Sleep(20 * time.Millisecond) // now disconnect db.Close() // don't return - }() + }(i) } wg.Wait() @@ -869,100 +719,7 @@ func TestManagerReturnTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseInitialPoolSize = 10 - cfg.NumOfCleaningWorkers = 2 - cfg.TestDatabaseMaxPoolSize = 10 - cfg.TestDatabaseGetTimeout = 200 * time.Millisecond - - tests := []struct { - name string - giveBackFunc func(m *manager.Manager, ctx context.Context, hash string, id int) error - resultCheck func(row *sql.Row, id int) - }{ - { - name: "Return", - giveBackFunc: func(m *manager.Manager, ctx context.Context, hash string, id int) error { - return m.ReturnTestDatabase(ctx, hash, id) - }, - resultCheck: func(row *sql.Row, id int) { - assert.NoError(t, row.Err(), id) - var name string - assert.NoError(t, row.Scan(&name), id) - assert.Equal(t, "Snufkin", name) - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - - m, _ := testManagerWithConfig(cfg) - - if err := m.Initialize(ctx); err != nil { - t.Fatalf("initializing manager failed: %v", err) - } - - defer disconnectManager(t, m) - - hash := "hashinghash" - - template, err := m.InitializeTemplateDatabase(ctx, hash) - if err != nil { - t.Fatalf("failed to initialize template database: %v", err) - } - - populateTemplateDB(t, template) - - if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { - t.Fatalf("failed to finalize template database: %v", err) - } - - for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { - testDB, err := m.GetTestDatabase(ctx, hash) - assert.NoError(t, err) - - // open the connection and modify the test DB - db, err := sql.Open("postgres", testDB.Config.ConnectionString()) - require.NoError(t, err) - require.NoError(t, db.PingContext(ctx)) - - _, err = db.ExecContext(ctx, `INSERT INTO pilots (id, "name", created_at, updated_at) VALUES ('777a1a87-5ef7-4309-8814-0f1054751177', 'Snufkin', '2023-07-13 09:44:00.548', '2023-07-13 09:44:00.548')`) - assert.NoError(t, err, testDB.ID) - db.Close() - } - - _, err = m.GetTestDatabase(ctx, hash) - assert.ErrorIs(t, err, pool.ErrPoolFull) - - // recreate or return test database - for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { - assert.NoError(t, tt.giveBackFunc(m, ctx, hash, i), i) - } - - for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { - // assert that test db can be get again - testDB, err := m.GetTestDatabase(ctx, hash) - assert.NoError(t, err) - - db, err := sql.Open("postgres", testDB.Config.ConnectionString()) - require.NoError(t, err) - require.NoError(t, db.PingContext(ctx)) - - row := db.QueryRowContext(ctx, "SELECT name FROM pilots WHERE id = '777a1a87-5ef7-4309-8814-0f1054751177'") - tt.resultCheck(row, testDB.ID) - db.Close() - } - }) - } -} - -func TestManagerRecreateTestDatabaseRecreateDisabled(t *testing.T) { - ctx := context.Background() - - cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseInitialPoolSize = 5 - cfg.NumOfCleaningWorkers = 2 + cfg.TestDatabaseInitialPoolSize = 1 cfg.TestDatabaseMaxPoolSize = 10 cfg.TestDatabaseGetTimeout = 200 * time.Millisecond @@ -987,33 +744,41 @@ func TestManagerRecreateTestDatabaseRecreateDisabled(t *testing.T) { t.Fatalf("failed to finalize template database: %v", err) } - testDB, err := m.GetTestDatabase(ctx, hash) + testDB1, err := m.GetTestDatabase(ctx, hash) assert.NoError(t, err) - // open the connection and modify the test DB - db, err := sql.Open("postgres", testDB.Config.ConnectionString()) + db, err := sql.Open("postgres", testDB1.Config.ConnectionString()) require.NoError(t, err) require.NoError(t, db.PingContext(ctx)) - _, err = db.ExecContext(ctx, `INSERT INTO pilots (id, "name", created_at, updated_at) VALUES ('777a1a87-5ef7-4309-8814-0f1054751177', 'Snufkin', '2023-07-13 09:44:00.548', '2023-07-13 09:44:00.548')`) - assert.NoError(t, err, testDB.ID) + assert.NoError(t, err, testDB1.ID) db.Close() + // finally return it + assert.NoError(t, m.ReturnTestDatabase(ctx, hash, testDB1.ID)) - // assert.NoError(t, m.RecreateTestDatabase(ctx, hash, testDB.ID)) + // on first GET call the pool has been extended + // we will get the newly created DB + testDB2, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + assert.NotEqual(t, testDB1.ID, testDB2.ID) - time.Sleep(100 * time.Millisecond) // sleep sufficient time to recreate the db by a worker (which should not happen) + // next in 'ready' channel should be the returned DB + testDB3, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + assert.Equal(t, testDB1.ID, testDB3.ID) - db, err = sql.Open("postgres", testDB.Config.ConnectionString()) + // assert that it hasn't been cleaned but just reused directly + db, err = sql.Open("postgres", testDB3.Config.ConnectionString()) require.NoError(t, err) require.NoError(t, db.PingContext(ctx)) - // assert that the data is still there, even after RecreateTestDatabase is called row := db.QueryRowContext(ctx, "SELECT name FROM pilots WHERE id = '777a1a87-5ef7-4309-8814-0f1054751177'") assert.NoError(t, row.Err()) var name string assert.NoError(t, row.Scan(&name)) assert.Equal(t, "Snufkin", name) db.Close() + } func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index fbb426f..e16101c 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -179,8 +179,8 @@ func (pool *HashPool) AddTestDatabase(ctx context.Context, templateDB db.Databas func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string, maxConcurrentTasks int) { handlers := map[string]func(ctx context.Context) error{ - workerTaskExtend: pool.extendIngoreErrPoolFull, - workerTaskCleanDirty: pool.cleanDirty, + workerTaskExtend: ignoreErrs(pool.extend, ErrPoolFull, context.Canceled), + workerTaskCleanDirty: ignoreErrs(pool.cleanDirty, context.Canceled), } // to limit the number of running goroutines. @@ -209,7 +209,7 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string // fmt.Println("task", task) if err := handler(ctx); err != nil { - // fmt.Println("task", task, "failed:", err.Error()) + fmt.Println("task", task, "failed:", err.Error()) } }(task) @@ -244,12 +244,6 @@ func (pool *HashPool) controlLoop() { } } -func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id int) error { - - return nil - -} - func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { pool.Lock() defer pool.Unlock() @@ -314,7 +308,7 @@ func (pool *HashPool) cleanDirty(ctx context.Context) error { reg.End() if err != nil { - fmt.Printf("worker_clean_dirty: failed to clean up DB ID='%v': %v\n", id, err) + // fmt.Printf("worker_clean_dirty: failed to clean up DB ID='%v': %v\n", id, err) // we guarantee FIFO, we must keeping trying to clean up **exactly this** test database! if errors.Is(err, ErrTestDBInUse) { @@ -349,13 +343,16 @@ func (pool *HashPool) cleanDirty(ctx context.Context) error { return nil } -func (pool *HashPool) extendIngoreErrPoolFull(ctx context.Context) error { - err := pool.extend(ctx) - if errors.Is(err, ErrPoolFull) { - return nil +func ignoreErrs(f func(ctx context.Context) error, errs ...error) func(context.Context) error { + return func(ctx context.Context) error { + err := f(ctx) + for _, e := range errs { + if errors.Is(err, e) { + return nil + } + } + return err } - - return err } func (pool *HashPool) extend(ctx context.Context) error { From 12f7fcfe31f0b03858a4edd6187c0d4daec853ad Mon Sep 17 00:00:00 2001 From: anjankow Date: Sat, 22 Jul 2023 10:47:07 +0000 Subject: [PATCH 131/160] use PoolMaxParallelTasks --- pkg/manager/manager.go | 12 ++++++---- pkg/manager/manager_config.go | 8 +++---- pkg/pool/pool.go | 12 +++++----- pkg/pool/pool_collection.go | 9 ++++---- pkg/pool/pool_collection_test.go | 38 ++++++++++++++++---------------- 5 files changed, 41 insertions(+), 38 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index ad9efa0..f46282c 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -58,16 +58,20 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { config.TestDatabaseInitialPoolSize = config.TestDatabaseMaxPoolSize } + if config.PoolMaxParallelTasks < 1 { + config.PoolMaxParallelTasks = 1 + } + m := &Manager{ config: config, db: nil, templates: templates.NewCollection(), pool: pool.NewPoolCollection( pool.PoolConfig{ - MaxPoolSize: config.TestDatabaseMaxPoolSize, - InitialPoolSize: config.TestDatabaseInitialPoolSize, - TestDBNamePrefix: testDBPrefix, - NumOfWorkers: config.NumOfCleaningWorkers, + MaxPoolSize: config.TestDatabaseMaxPoolSize, + InitialPoolSize: config.TestDatabaseInitialPoolSize, + TestDBNamePrefix: testDBPrefix, + PoolMaxParallelTasks: config.PoolMaxParallelTasks, }, ), } diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index c7f3fbd..1523710 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -21,7 +21,7 @@ type ManagerConfig struct { TestDatabaseMaxPoolSize int // Maximal pool size that won't be exceeded TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state TestDatabaseGetTimeout time.Duration // Time to wait for a ready database before extending the pool - NumOfCleaningWorkers int // Number of pool workers cleaning up dirty DBs + PoolMaxParallelTasks int // Maximal number of pool tasks running in parallel. Must be a number greater or equal 1. } func DefaultManagerConfigFromEnv() ManagerConfig { @@ -60,8 +60,8 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", runtime.NumCPU()), // TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", runtime.NumCPU()*4), - TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 60000)), // TODO eventually even bigger defaults? - TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 500)), // only used when INTEGRESQL_TEST_DB_FORCE_RETURN=true - NumOfCleaningWorkers: util.GetEnvAsInt("INTEGRESQL_NUM_OF_CLEANING_WORKERS", runtime.NumCPU()), + TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 5*60*10e3 /*5 min*/)), + TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 1*60*10e3 /*1 min, timeout hardcoded also in GET request handler*/)), + PoolMaxParallelTasks: util.GetEnvAsInt("INTEGRESQL_POOL_MAX_PARALLEL_TASKS", runtime.NumCPU()), } } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index e16101c..f8f8271 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -26,7 +26,7 @@ const ( dbStateDirty // Taken by a client and potentially currently in use. ) -const minConcurrentTasksNum = 3 // controlLoop + workerTaskLoop + at least one goroutine to handle a task +const minConcurrentTasksNum = 1 type existingDB struct { state dbState @@ -63,8 +63,8 @@ type HashPool struct { // Starts the workers to extend the pool in background up to requested inital number. func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *HashPool { - if cfg.MaxConcurrentTasks < minConcurrentTasksNum { - cfg.MaxConcurrentTasks = minConcurrentTasksNum + if cfg.PoolMaxParallelTasks < minConcurrentTasksNum { + cfg.PoolMaxParallelTasks = minConcurrentTasksNum } pool := &HashPool{ @@ -176,7 +176,7 @@ func (pool *HashPool) AddTestDatabase(ctx context.Context, templateDB db.Databas return pool.extend(ctx) } -func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string, maxConcurrentTasks int) { +func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string, poolMaxParallelTasks int) { handlers := map[string]func(ctx context.Context) error{ workerTaskExtend: ignoreErrs(pool.extend, ErrPoolFull, context.Canceled), @@ -184,7 +184,7 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string } // to limit the number of running goroutines. - var semaphore = make(chan struct{}, pool.MaxConcurrentTasks) + var semaphore = make(chan struct{}, poolMaxParallelTasks) for task := range taskChan { handler, ok := handlers[task] @@ -225,7 +225,7 @@ func (pool *HashPool) controlLoop() { pool.wg.Add(1) go func() { defer pool.wg.Done() - pool.workerTaskLoop(ctx, workerTasksChan, pool.MaxConcurrentTasks) + pool.workerTaskLoop(ctx, workerTasksChan, pool.PoolMaxParallelTasks) }() for task := range pool.tasksChan { diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index 36d8934..85e635a 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -14,11 +14,10 @@ import ( var ErrUnknownHash = errors.New("no database pool exists for this hash") type PoolConfig struct { - MaxPoolSize int - InitialPoolSize int - TestDBNamePrefix string - NumOfWorkers int // Number of cleaning workers (each hash pool runs this number of workers). - MaxConcurrentTasks int + MaxPoolSize int + InitialPoolSize int + TestDBNamePrefix string + PoolMaxParallelTasks int } type PoolCollection struct { diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index 6afb802..c89b42f 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -17,9 +17,9 @@ func TestPoolAddGet(t *testing.T) { ctx := context.Background() cfg := pool.PoolConfig{ - MaxPoolSize: 2, - NumOfWorkers: 4, - TestDBNamePrefix: "prefix_", + MaxPoolSize: 2, + PoolMaxParallelTasks: 4, + TestDBNamePrefix: "prefix_", } p := pool.NewPoolCollection(cfg) @@ -92,10 +92,10 @@ func TestPoolAddGetConcurrent(t *testing.T) { maxPoolSize := 15 cfg := pool.PoolConfig{ - MaxPoolSize: maxPoolSize, - InitialPoolSize: maxPoolSize, - NumOfWorkers: 4, - TestDBNamePrefix: "", + MaxPoolSize: maxPoolSize, + InitialPoolSize: maxPoolSize, + PoolMaxParallelTasks: 4, + TestDBNamePrefix: "", } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) @@ -147,9 +147,9 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 40, - NumOfWorkers: 4, - TestDBNamePrefix: "", + MaxPoolSize: 40, + PoolMaxParallelTasks: 4, + TestDBNamePrefix: "", } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) @@ -210,8 +210,8 @@ func TestPoolRemoveAll(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 6, - NumOfWorkers: 4, + MaxPoolSize: 6, + PoolMaxParallelTasks: 4, } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) @@ -261,10 +261,10 @@ func TestPoolReuseDirty(t *testing.T) { maxPoolSize := 40 cfg := pool.PoolConfig{ - MaxPoolSize: maxPoolSize, - InitialPoolSize: maxPoolSize, - NumOfWorkers: 1, - TestDBNamePrefix: "test_", + MaxPoolSize: maxPoolSize, + InitialPoolSize: maxPoolSize, + PoolMaxParallelTasks: 1, + TestDBNamePrefix: "test_", } p := pool.NewPoolCollection(cfg) @@ -272,7 +272,7 @@ func TestPoolReuseDirty(t *testing.T) { t.Cleanup(func() { p.Stop() }) getDirty := func(seenIDMap *sync.Map) { - newTestDB1, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, 10*time.Millisecond) + newTestDB1, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, 1*time.Second) assert.NoError(t, err) seenIDMap.Store(newTestDB1.ID, true) } @@ -320,8 +320,8 @@ func TestPoolReturnTestDatabase(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 10, - NumOfWorkers: 3, + MaxPoolSize: 10, + PoolMaxParallelTasks: 3, } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) From f6d48e894f0b715928951236241e59c9a3a7a9d1 Mon Sep 17 00:00:00 2001 From: Luc van Kessel Date: Thu, 27 Jul 2023 12:06:55 +0200 Subject: [PATCH 132/160] added EXPOSE to dockerfile to expose default port --- Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 0b96f57..d335b08 100644 --- a/Dockerfile +++ b/Dockerfile @@ -85,4 +85,6 @@ COPY --from=builder-integresql /app/bin/integresql / # Note that cmd is not supported with these kind of images, no shell included # see https://github.com/GoogleContainerTools/distroless/issues/62 # and https://github.com/GoogleContainerTools/distroless#entrypoints -ENTRYPOINT [ "/integresql" ] \ No newline at end of file +ENTRYPOINT [ "/integresql" ] + +EXPOSE 5000 From 5878138e17c2dbba747f1046679ff20b9c993632 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Fri, 4 Aug 2023 15:49:41 +0200 Subject: [PATCH 133/160] review only, reactivate print debugging --- pkg/manager/manager.go | 1 - pkg/pool/pool.go | 16 ++++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index f46282c..36c3ccc 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -280,7 +280,6 @@ func (m Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db. } // GetTestDatabase tries to get a ready test DB from an existing pool. -// If no DB is ready after the preconfigured timeout, ErrTimeout is returned. func (m Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatabase, error) { ctx, task := trace.NewTask(ctx, "get_test_db") defer task.End() diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index f8f8271..ebef62c 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -118,7 +118,7 @@ func (pool *HashPool) Stop() { func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { var index int - // fmt.Printf("pool#%s: waiting for ready ID...\n", hash) + fmt.Printf("pool#%s: waiting for ready ID...\n", hash) select { case <-time.After(timeout): @@ -130,7 +130,7 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout case index = <-pool.ready: } - // fmt.Printf("pool#%s: got ready ID=%v\n", hash, index) + fmt.Printf("pool#%s: got ready ID=%v\n", hash, index) reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") pool.Lock() @@ -147,7 +147,7 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout // sanity check, should never happen - we got this index from 'ready' channel if testDB.state != dbStateReady { - // fmt.Printf("pool#%s: GetTestDatabase ErrInvalidState ID=%v\n", hash, index) + fmt.Printf("pool#%s: GetTestDatabase ErrInvalidState ID=%v\n", hash, index) err = ErrInvalidState return @@ -167,7 +167,7 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout pool.tasksChan <- workerTaskCleanDirty } - // fmt.Printf("pool#%s: ready=%d, dirty=%d, waitingForCleaning=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", hash, len(pool.ready), len(pool.dirty), len(pool.waitingForCleaning), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) return testDB.TestDatabase, nil } @@ -178,6 +178,8 @@ func (pool *HashPool) AddTestDatabase(ctx context.Context, templateDB db.Databas func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string, poolMaxParallelTasks int) { + fmt.Println("workerTaskLoop") + handlers := map[string]func(ctx context.Context) error{ workerTaskExtend: ignoreErrs(pool.extend, ErrPoolFull, context.Canceled), workerTaskCleanDirty: ignoreErrs(pool.cleanDirty, context.Canceled), @@ -218,6 +220,8 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string func (pool *HashPool) controlLoop() { + fmt.Println("controlLoop") + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -340,6 +344,7 @@ func (pool *HashPool) cleanDirty(ctx context.Context) error { pool.ready <- testDB.ID + fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (cleanDirty)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) return nil } @@ -362,6 +367,9 @@ func (pool *HashPool) extend(ctx context.Context) error { reg := trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") pool.Lock() + + fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (extend)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + defer pool.Unlock() reg.End() From 752fad2665f383152f43e0ffcea9cb08c59a316f Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 9 Aug 2023 12:42:20 +0000 Subject: [PATCH 134/160] bring back simple /recreate endpoint --- internal/api/templates/routes.go | 1 + internal/api/templates/templates.go | 27 ++++++++++++++ pkg/manager/manager.go | 38 ++++++++++++++++++- pkg/manager/manager_config.go | 2 +- pkg/manager/manager_test.go | 58 +++++++++++++++++++++++++++++ pkg/pool/pool.go | 42 +++++++++++++++++++-- pkg/pool/pool_collection.go | 11 +++++- 7 files changed, 172 insertions(+), 7 deletions(-) diff --git a/internal/api/templates/routes.go b/internal/api/templates/routes.go index bfd7120..2ede71f 100644 --- a/internal/api/templates/routes.go +++ b/internal/api/templates/routes.go @@ -11,6 +11,7 @@ func InitRoutes(s *api.Server) { g.GET("/:hash/tests", getTestDatabase(s)) g.DELETE("/:hash/tests/:id", deleteReturnTestDatabase(s)) // deprecated, use POST /unlock instead + g.POST("/:hash/tests/:id/recreate", postRecreateTestDatabase(s)) g.POST("/:hash/tests/:id/unlock", postUnlockTestDatabase(s)) } diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index 5bae7f1..f32446a 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -154,3 +154,30 @@ func postUnlockTestDatabase(s *api.Server) echo.HandlerFunc { return c.NoContent(http.StatusNoContent) } } + +func postRecreateTestDatabase(s *api.Server) echo.HandlerFunc { + return func(c echo.Context) error { + hash := c.Param("hash") + id, err := strconv.Atoi(c.Param("id")) + if err != nil { + return echo.NewHTTPError(http.StatusBadRequest, "invalid test database ID") + } + + if err := s.Manager.RecreateTestDatabase(c.Request().Context(), hash, id); err != nil { + switch err { + case manager.ErrManagerNotReady: + return echo.ErrServiceUnavailable + case manager.ErrTemplateNotFound: + return echo.NewHTTPError(http.StatusNotFound, "template not found") + case manager.ErrTestNotFound: + return echo.NewHTTPError(http.StatusNotFound, "test database not found") + case pool.ErrTestDBInUse: + return echo.NewHTTPError(http.StatusLocked, pool.ErrTestDBInUse.Error()) + default: + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) + } + } + + return c.NoContent(http.StatusNoContent) + } +} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index f46282c..72746a0 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -320,7 +320,7 @@ func (m Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatab return testDB, nil } -// ReturnTestDatabase returns an unchanged test DB to the pool, allowing for reuse without cleaning. +// ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). func (m Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) error { ctx, task := trace.NewTask(ctx, "return_test_db") defer task.End() @@ -357,6 +357,42 @@ func (m Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) er return nil } +// RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. +func (m *Manager) RecreateTestDatabase(ctx context.Context, hash string, id int) error { + ctx, task := trace.NewTask(ctx, "recreate_test_db") + defer task.End() + + if !m.Ready() { + return ErrManagerNotReady + } + + // check if the template exists and is finalized + template, found := m.templates.Get(ctx, hash) + if !found { + return m.dropDatabaseWithID(ctx, hash, id) + } + + if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != + templates.TemplateStateFinalized { + return ErrInvalidTemplateState + } + + // template is ready, we can returb the testDB to the pool and have it cleaned up + if err := m.pool.RecreateTestDatabase(ctx, hash, id); err != nil { + if !(errors.Is(err, pool.ErrInvalidIndex) || + errors.Is(err, pool.ErrUnknownHash)) { + // other error is an internal error + return err + } + + // db is not tracked in the pool + // try to drop it if exists + return m.dropDatabaseWithID(ctx, hash, id) + } + + return nil +} + func (m Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) error { if !m.Ready() { return ErrManagerNotReady diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 1523710..3269375 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -20,7 +20,7 @@ type ManagerConfig struct { TestDatabaseInitialPoolSize int // Initial number of ready DBs prepared in background TestDatabaseMaxPoolSize int // Maximal pool size that won't be exceeded TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state - TestDatabaseGetTimeout time.Duration // Time to wait for a ready database before extending the pool + TestDatabaseGetTimeout time.Duration // Time to wait for a ready database PoolMaxParallelTasks int // Maximal number of pool tasks running in parallel. Must be a number greater or equal 1. } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index aef86ed..8709cb8 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -644,6 +644,64 @@ func TestManagerGetAndReturnTestDatabase(t *testing.T) { assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) } +func TestManagerGetAndRecreateTestDatabase(t *testing.T) { + ctx := context.Background() + + cfg := manager.DefaultManagerConfigFromEnv() + cfg.TestDatabaseInitialPoolSize = 10 + cfg.TestDatabaseMaxPoolSize = 15 + cfg.TestDatabaseGetTimeout = 200 * time.Millisecond + m, _ := testManagerWithConfig(cfg) + + if err := m.Initialize(ctx); err != nil { + t.Fatalf("initializing manager failed: %v", err) + } + + defer disconnectManager(t, m) + + hash := "hashinghash" + + template, err := m.InitializeTemplateDatabase(ctx, hash) + if err != nil { + t.Fatalf("failed to initialize template database: %v", err) + } + + populateTemplateDB(t, template) + + if _, err := m.FinalizeTemplateDatabase(ctx, hash); err != nil { + t.Fatalf("failed to finalize template database: %v", err) + } + + // request many more databases than initally added + for i := 0; i <= cfg.TestDatabaseMaxPoolSize*3; i++ { + test, err := m.GetTestDatabase(ctx, hash) + assert.NoError(t, err) + assert.NotEmpty(t, test) + + db, err := sql.Open("postgres", test.Config.ConnectionString()) + require.NoError(t, err) + require.NoError(t, db.PingContext(ctx)) + + // assert that it's always initialized according to a template + var res int + assert.NoError(t, db.QueryRowContext(ctx, "SELECT COUNT(*) FROM pilots WHERE name = 'Anna'").Scan(&res)) + assert.Equal(t, 0, res, i) + + // make changes into test DB + _, err = db.ExecContext(ctx, `INSERT INTO pilots (id, "name", created_at, updated_at) VALUES ('844a1a87-5ef7-4309-8814-0f1054751156', 'Anna', '2023-03-23 09:44:00.548', '2023-03-23 09:44:00.548');`) + require.NoError(t, err) + assert.NoError(t, db.QueryRowContext(ctx, "SELECT COUNT(*) FROM pilots WHERE name = 'Anna'").Scan(&res)) + assert.Equal(t, 1, res) + db.Close() + + // recreate testDB after usage + assert.NoError(t, m.RecreateTestDatabase(ctx, hash, test.ID)) + } + + // discard the template + assert.NoError(t, m.DiscardTemplateDatabase(ctx, hash)) +} + func TestManagerGetTestDatabaseDontReturn(t *testing.T) { ctx := context.Background() diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index f8f8271..25a6ae9 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -29,8 +29,7 @@ const ( const minConcurrentTasksNum = 1 type existingDB struct { - state dbState - createdAt time.Time + state dbState db.TestDatabase } @@ -244,6 +243,7 @@ func (pool *HashPool) controlLoop() { } } +// ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { pool.Lock() defer pool.Unlock() @@ -268,6 +268,41 @@ func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id in } +// RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. +func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id int) error { + + pool.RLock() + if id < 0 || id >= len(pool.dbs) { + pool.RUnlock() + return ErrInvalidIndex + } + + // check if db is in the correct state + testDB := pool.dbs[id] + pool.RUnlock() + + if testDB.state == dbStateReady { + return nil + } + + // state is dirty -> we will now recreate it + if err := pool.recreateDB(ctx, &testDB); err != nil { + return err + } + + pool.Lock() + defer pool.Unlock() + + // change the state to 'ready' + testDB.state = dbStateReady + pool.dbs[id] = testDB + + pool.ready <- id + + return nil + +} + // cleanDirty reads 'dirty' channel and cleans up a test DB with the received index. // When the DB is recreated according to a template, its index goes to the 'ready' channel. // The function waits until there is a dirty DB... @@ -373,8 +408,7 @@ func (pool *HashPool) extend(ctx context.Context) error { // initalization of a new DB using template config newTestDB := existingDB{ - state: dbStateReady, - createdAt: time.Now(), + state: dbStateReady, TestDatabase: db.TestDatabase{ Database: db.Database{ TemplateHash: pool.templateDB.TemplateHash, diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index 85e635a..6aad7e2 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -44,7 +44,6 @@ type RemoveDBFunc func(ctx context.Context, testDB db.TestDatabase) error func makeActualRecreateTestDBFunc(templateName string, userRecreateFunc RecreateDBFunc) recreateTestDBFunc { return func(ctx context.Context, testDBWrapper *existingDB) error { - testDBWrapper.createdAt = time.Now() return userRecreateFunc(ctx, testDBWrapper.TestDatabase, templateName) } } @@ -114,6 +113,16 @@ func (p *PoolCollection) ReturnTestDatabase(ctx context.Context, hash string, id return pool.ReturnTestDatabase(ctx, hash, id) } +// RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. +func (p *PoolCollection) RecreateTestDatabase(ctx context.Context, hash string, id int) error { + pool, err := p.getPool(ctx, hash) + if err != nil { + return err + } + + return pool.RecreateTestDatabase(ctx, hash, id) +} + // RemoveAllWithHash removes a pool with a given template hash. // All background workers belonging to this pool are stopped. func (p *PoolCollection) RemoveAllWithHash(ctx context.Context, hash string, removeFunc RemoveDBFunc) error { From 22f984eac1954cbf0598a5d53ab55a1154a8df61 Mon Sep 17 00:00:00 2001 From: anjankow Date: Wed, 9 Aug 2023 12:42:32 +0000 Subject: [PATCH 135/160] remove unused utils --- .vscode/launch.json | 1 - .vscode/tasks.json | 8 -------- pkg/util/sort.go | 42 ------------------------------------------ pkg/util/sort_test.go | 26 -------------------------- 4 files changed, 77 deletions(-) delete mode 100644 pkg/util/sort.go delete mode 100644 pkg/util/sort_test.go diff --git a/.vscode/launch.json b/.vscode/launch.json index b80d00a..14667b8 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -12,7 +12,6 @@ "program": "${workspaceFolder}/cmd/server", "env": {}, "args": [], - "preLaunchTask": "build" } ] } \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json index c7c1385..6104e5c 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -12,13 +12,5 @@ "isDefault": true } }, - { - "label": "build", - "type": "shell", - "command": "make build", - "group": { - "kind": "build" - } - } ] } \ No newline at end of file diff --git a/pkg/util/sort.go b/pkg/util/sort.go deleted file mode 100644 index e55cf89..0000000 --- a/pkg/util/sort.go +++ /dev/null @@ -1,42 +0,0 @@ -package util - -import ( - "time" -) - -// SliceSortedByTime keeps data that should be sorted by KeyTime -type SliceSortedByTime[T any] []structSortedByTime[T] - -// NewSliceToSortByTime creates a new SliceSortedByTime -func NewSliceToSortByTime[T any]() SliceSortedByTime[T] { - return SliceSortedByTime[T]{} -} - -type structSortedByTime[T any] struct { - KeyTime time.Time - Data T -} - -// Add adds a new element to the end of the slice. -// Call sort.Sort() on the slice to have it ordered. -func (s *SliceSortedByTime[T]) Add(t time.Time, data T) { - *s = append(*s, structSortedByTime[T]{ - KeyTime: t, - Data: data, - }) -} - -// Len implements sort.Interface -func (s SliceSortedByTime[T]) Len() int { - return len(s) -} - -// Less implements sort.Interface -func (s SliceSortedByTime[T]) Less(i, j int) bool { - return s[i].KeyTime.Before(s[j].KeyTime) -} - -// Swap implements sort.Interface -func (s SliceSortedByTime[T]) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} diff --git a/pkg/util/sort_test.go b/pkg/util/sort_test.go deleted file mode 100644 index 1a31f75..0000000 --- a/pkg/util/sort_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package util_test - -import ( - "sort" - "testing" - "time" - - "github.com/allaboutapps/integresql/pkg/util" - "github.com/stretchr/testify/assert" -) - -func TestSliceSortedByTimeImplements(t *testing.T) { - assert.Implements(t, (*sort.Interface)(nil), new(util.SliceSortedByTime[int])) -} - -func TestSliceSortedByTimeSorted(t *testing.T) { - s := util.NewSliceToSortByTime[int]() - s.Add(time.Now().Add(time.Hour), 1) - s.Add(time.Now().Add(-time.Hour), 2) - s.Add(time.Now(), 3) - - sort.Sort(s) - assert.Equal(t, s[0].Data, 2, s[0].KeyTime) - assert.Equal(t, s[1].Data, 3, s[1].KeyTime) - assert.Equal(t, s[2].Data, 1, s[2].KeyTime) -} From c7342a640309424b4aaaee2ae42e1616765cf1f8 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Fri, 25 Aug 2023 16:45:18 +0200 Subject: [PATCH 136/160] close, range and refill dirty channel on unlock and recreate, debug statements, use 1000 instead of 10e3 --- .drone.yml | 2 +- pkg/manager/manager_config.go | 4 ++-- pkg/pool/pool.go | 43 +++++++++++++++++++++++++++++++++-- 3 files changed, 44 insertions(+), 5 deletions(-) diff --git a/.drone.yml b/.drone.yml index 20a5c9a..0a8deee 100644 --- a/.drone.yml +++ b/.drone.yml @@ -30,7 +30,7 @@ alias: - &IMAGE_DEPLOY_ID ${DRONE_REPO,,}:${DRONE_COMMIT_SHA} # Defines which branches will trigger a docker image push our Google Cloud Registry (tags are always published) - - &GCR_PUBLISH_BRANCHES [dev, master, aj/pooling-improvements] + - &GCR_PUBLISH_BRANCHES [dev, master, aj/pooling-improvements, mr/aj-review] # Docker registry publish default settings - &GCR_REGISTRY_SETTINGS diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 3269375..435ec11 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -60,8 +60,8 @@ func DefaultManagerConfigFromEnv() ManagerConfig { TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", runtime.NumCPU()), // TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", runtime.NumCPU()*4), - TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 5*60*10e3 /*5 min*/)), - TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 1*60*10e3 /*1 min, timeout hardcoded also in GET request handler*/)), + TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 5*60*1000 /*5 min*/)), + TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 1*60*1000 /*1 min, timeout hardcoded also in GET request handler*/)), PoolMaxParallelTasks: util.GetEnvAsInt("INTEGRESQL_POOL_MAX_PARALLEL_TASKS", runtime.NumCPU()), } } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 562d5f4..7980b08 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -162,7 +162,7 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout // we try to ensure that InitialPoolSize count is staying ready // thus, we try to move the oldest dirty dbs into cleaning - if len(pool.dbs) >= pool.PoolConfig.MaxPoolSize { + if len(pool.dbs) >= pool.PoolConfig.MaxPoolSize && len(pool.ready) < pool.InitialPoolSize { pool.tasksChan <- workerTaskCleanDirty } @@ -266,10 +266,29 @@ func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id in testDB.state = dbStateReady pool.dbs[id] = testDB + // fmt.Printf("ReturnTestDatabase %v: close channel\n", id) + + // The testDB is still in the dirty channel. + // We need to explicitly remove it from there by force closing the channel so we can range over it and thus recreate a new dirty channel without the returned ID. + newDirty := make(chan int, pool.MaxPoolSize) + close(pool.dirty) + + for dirtyID := range pool.dirty { + if dirtyID != id { + newDirty <- dirtyID + } + } + + // fmt.Printf("ReturnTestDatabase %v: reset channel\n", id) + pool.dirty = newDirty + + // fmt.Printf("ReturnTestDatabase %v: ready\n", id) + // id to ready again. pool.ready <- id - return nil + fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (ReturnTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + return nil } // RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. @@ -301,8 +320,28 @@ func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id testDB.state = dbStateReady pool.dbs[id] = testDB + // fmt.Printf("RecreateTestDatabase %v: close channel\n", id) + + // The testDB is still in the dirty channel. + // We need to explicitly remove it from there by force closing the channel so we can range over it and thus recreate a new dirty channel without the returned ID. + newDirty := make(chan int, pool.MaxPoolSize) + close(pool.dirty) + + for dirtyID := range pool.dirty { + if dirtyID != id { + newDirty <- dirtyID + } + } + + // fmt.Printf("RecreateTestDatabase %v: reset channel\n", id) + pool.dirty = newDirty + + // fmt.Printf("RecreateTestDatabase %v: ready\n", id) + // id to ready again. pool.ready <- id + fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (RecreateTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + return nil } From 8e64b273ae131793bb4a44dba64a4583d05d72db Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Mon, 28 Aug 2023 18:21:50 +0200 Subject: [PATCH 137/160] filter via tmp channel without closing the initial, fix tests / stabilize via disabling worker autostart --- Makefile | 5 +- docker-compose.yml | 4 +- pkg/manager/manager_test.go | 8 +-- pkg/pool/pool.go | 90 ++++++++++++++++---------------- pkg/pool/pool_collection.go | 25 ++++++--- pkg/pool/pool_collection_test.go | 31 ++++++----- pkg/templates/template_test.go | 6 +-- 7 files changed, 93 insertions(+), 76 deletions(-) diff --git a/Makefile b/Makefile index 284edf2..8ce960f 100644 --- a/Makefile +++ b/Makefile @@ -45,7 +45,10 @@ test: ##- Run tests, output by package, print coverage. # note that we explicitly don't want to use a -coverpkg=./... option, per pkg coverage take precedence go-test-by-pkg: ##- (opt) Run tests, output by package. - gotestsum --format pkgname-and-test-fails --jsonfile /tmp/test.log -- -race -cover -count=1 -coverprofile=/tmp/coverage.out ./... + gotestsum --format pkgname-and-test-fails --format-hide-empty-pkg --jsonfile /tmp/test.log -- -race -cover -count=1 -coverprofile=/tmp/coverage.out ./... + +go-test-by-name: ##- (opt) Run tests, output by testname. + gotestsum --format testname --jsonfile /tmp/test.log -- -race -cover -count=1 -coverprofile=/tmp/coverage.out ./... go-test-print-coverage: ##- (opt) Print overall test coverage (must be done after running tests). @printf "coverage " diff --git a/docker-compose.yml b/docker-compose.yml index 75c3afd..9f2de21 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,8 +5,8 @@ services: build: context: . target: development - ports: - - "5000:5000" + # ports: + # - "5000:5000" working_dir: /app volumes: - .:/app #:delegated diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 8709cb8..9763cc6 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -648,9 +648,9 @@ func TestManagerGetAndRecreateTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseInitialPoolSize = 10 - cfg.TestDatabaseMaxPoolSize = 15 - cfg.TestDatabaseGetTimeout = 200 * time.Millisecond + cfg.TestDatabaseInitialPoolSize = 4 + cfg.TestDatabaseMaxPoolSize = 8 + cfg.TestDatabaseGetTimeout = 250 * time.Millisecond m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -673,7 +673,7 @@ func TestManagerGetAndRecreateTestDatabase(t *testing.T) { } // request many more databases than initally added - for i := 0; i <= cfg.TestDatabaseMaxPoolSize*3; i++ { + for i := 0; i <= cfg.TestDatabaseMaxPoolSize*2; i++ { test, err := m.GetTestDatabase(ctx, hash) assert.NoError(t, err) assert.NotEmpty(t, test) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 7980b08..99f1549 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -157,6 +157,7 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout pool.dirty <- index if len(pool.dbs) < pool.PoolConfig.MaxPoolSize { + fmt.Printf("pool#%s: Conditional extend\n", hash) pool.tasksChan <- workerTaskExtend } @@ -177,7 +178,7 @@ func (pool *HashPool) AddTestDatabase(ctx context.Context, templateDB db.Databas func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string, poolMaxParallelTasks int) { - fmt.Println("workerTaskLoop") + fmt.Printf("pool#%s: workerTaskLoop\n", pool.templateDB.TemplateHash) handlers := map[string]func(ctx context.Context) error{ workerTaskExtend: ignoreErrs(pool.extend, ErrPoolFull, context.Canceled), @@ -208,9 +209,10 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string <-semaphore }() - // fmt.Println("task", task) + fmt.Printf("pool#%s: workerTaskLoop task=%v\n", pool.templateDB.TemplateHash, task) + if err := handler(ctx); err != nil { - fmt.Println("task", task, "failed:", err.Error()) + fmt.Printf("pool#%s: workerTaskLoop task=%v FAILED! err=%v\n", pool.templateDB.TemplateHash, task, err.Error()) } }(task) @@ -219,7 +221,7 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string func (pool *HashPool) controlLoop() { - fmt.Println("controlLoop") + fmt.Printf("pool#%s: controlLoop\n", pool.templateDB.TemplateHash) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -266,29 +268,43 @@ func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id in testDB.state = dbStateReady pool.dbs[id] = testDB - // fmt.Printf("ReturnTestDatabase %v: close channel\n", id) + // remove id from dirty and add it to ready channel + pool.excludeIDFromDirtyChannel(id) + pool.ready <- id - // The testDB is still in the dirty channel. - // We need to explicitly remove it from there by force closing the channel so we can range over it and thus recreate a new dirty channel without the returned ID. - newDirty := make(chan int, pool.MaxPoolSize) - close(pool.dirty) + fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (ReturnTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) - for dirtyID := range pool.dirty { - if dirtyID != id { - newDirty <- dirtyID - } - } + return nil +} - // fmt.Printf("ReturnTestDatabase %v: reset channel\n", id) - pool.dirty = newDirty +func (pool *HashPool) excludeIDFromDirtyChannel(id int) { - // fmt.Printf("ReturnTestDatabase %v: ready\n", id) - // id to ready again. - pool.ready <- id + // The testDB identified by overgiven id may still in the dirty channel. We want to exclude it. + // We need to explicitly remove it from there by filtering the current channel to a tmp channel. + // We finally close the tmp channel and flush it onto the dirty channel again. + // The db is ready again. + filteredDirty := make(chan int, pool.MaxPoolSize) - fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (ReturnTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + var dirtyID int + for loop := true; loop; { + select { + case dirtyID = <-pool.dirty: + if dirtyID != id { + filteredDirty <- dirtyID + } + default: + loop = false + break + } + } - return nil + // filteredDirty now has all filtered values without the returned id, redirect the other back to the dirty channel. + // close so we can range over it... + close(filteredDirty) + + for dirtyID := range filteredDirty { + pool.dirty <- dirtyID + } } // RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. @@ -320,24 +336,8 @@ func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id testDB.state = dbStateReady pool.dbs[id] = testDB - // fmt.Printf("RecreateTestDatabase %v: close channel\n", id) - - // The testDB is still in the dirty channel. - // We need to explicitly remove it from there by force closing the channel so we can range over it and thus recreate a new dirty channel without the returned ID. - newDirty := make(chan int, pool.MaxPoolSize) - close(pool.dirty) - - for dirtyID := range pool.dirty { - if dirtyID != id { - newDirty <- dirtyID - } - } - - // fmt.Printf("RecreateTestDatabase %v: reset channel\n", id) - pool.dirty = newDirty - - // fmt.Printf("RecreateTestDatabase %v: ready\n", id) - // id to ready again. + // remove id from dirty and add it to ready channel + pool.excludeIDFromDirtyChannel(id) pool.ready <- id fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (RecreateTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) @@ -361,20 +361,23 @@ func (pool *HashPool) cleanDirty(ctx context.Context) error { return ctx.Err() default: // nothing to do + fmt.Println("cleanDirty noop", id) return nil } + fmt.Printf("pool#%s: cleanDirty %v\n", pool.templateDB.TemplateHash, id) + regLock := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") - pool.RLock() + pool.Lock() regLock.End() if id < 0 || id >= len(pool.dbs) { // sanity check, should never happen - pool.RUnlock() + pool.Unlock() return ErrInvalidIndex } testDB := pool.dbs[id] - pool.RUnlock() + pool.Unlock() if testDB.state == dbStateReady { // nothing to do @@ -441,10 +444,9 @@ func (pool *HashPool) extend(ctx context.Context) error { reg := trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") pool.Lock() + defer pool.Unlock() fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (extend)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) - - defer pool.Unlock() reg.End() // get index of a next test DB - its ID diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index 6aad7e2..b509e9a 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -14,10 +14,11 @@ import ( var ErrUnknownHash = errors.New("no database pool exists for this hash") type PoolConfig struct { - MaxPoolSize int - InitialPoolSize int - TestDBNamePrefix string - PoolMaxParallelTasks int + MaxPoolSize int + InitialPoolSize int + TestDBNamePrefix string + PoolMaxParallelTasks int + DisableWorkerAutostart bool } type PoolCollection struct { @@ -59,12 +60,25 @@ func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Databas // Create a new HashPool pool := NewHashPool(cfg, templateDB, initDBFunc) - pool.Start() + + if !cfg.DisableWorkerAutostart { + pool.Start() + } // pool is ready p.pools[pool.templateDB.TemplateHash] = pool } +// Start is used to start all background workers +func (p *PoolCollection) Start() { + p.mutex.RLock() + defer p.mutex.RUnlock() + + for _, pool := range p.pools { + pool.Start() + } +} + // Stop is used to stop all background workers func (p *PoolCollection) Stop() { p.mutex.RLock() @@ -73,7 +87,6 @@ func (p *PoolCollection) Stop() { for _, pool := range p.pools { pool.Stop() } - } // GetTestDatabase picks up a ready to use test DB. It waits the given timeout until a DB is available. diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index c89b42f..465c515 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -17,9 +17,10 @@ func TestPoolAddGet(t *testing.T) { ctx := context.Background() cfg := pool.PoolConfig{ - MaxPoolSize: 2, - PoolMaxParallelTasks: 4, - TestDBNamePrefix: "prefix_", + MaxPoolSize: 2, + PoolMaxParallelTasks: 4, + TestDBNamePrefix: "prefix_", + DisableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! } p := pool.NewPoolCollection(cfg) @@ -37,16 +38,17 @@ func TestPoolAddGet(t *testing.T) { return nil } p.InitHashPool(ctx, templateDB, initFunc) + t.Cleanup(func() { p.Stop() }) - // get from empty + // get from empty (just initialized) _, err := p.GetTestDatabase(ctx, hash1, 0) assert.Error(t, err, pool.ErrTimeout) // add a new one assert.NoError(t, p.AddTestDatabase(ctx, templateDB)) // get it - testDB, err := p.GetTestDatabase(ctx, hash1, 100*time.Millisecond) + testDB, err := p.GetTestDatabase(ctx, hash1, 1*time.Second) assert.NoError(t, err) assert.Equal(t, "prefix_h1_000", testDB.Database.Config.Database) assert.Equal(t, "ich", testDB.Database.Config.Username) @@ -60,14 +62,14 @@ func TestPoolAddGet(t *testing.T) { assert.ErrorIs(t, p.AddTestDatabase(ctx, templateDB2), pool.ErrPoolFull) // get from empty h1 - _, err = p.GetTestDatabase(ctx, hash1, 0) - assert.Error(t, err, pool.ErrTimeout) + _, err = p.GetTestDatabase(ctx, hash1, 100*time.Millisecond) + assert.ErrorIs(t, err, pool.ErrTimeout) // get from h2 - testDB1, err := p.GetTestDatabase(ctx, hash2, 0) + testDB1, err := p.GetTestDatabase(ctx, hash2, 1*time.Second) assert.NoError(t, err) assert.Equal(t, hash2, testDB1.TemplateHash) - testDB2, err := p.GetTestDatabase(ctx, hash2, 0) + testDB2, err := p.GetTestDatabase(ctx, hash2, 1*time.Second) assert.NoError(t, err) assert.Equal(t, hash2, testDB2.TemplateHash) assert.NotEqual(t, testDB1.ID, testDB2.ID) @@ -237,7 +239,7 @@ func TestPoolRemoveAll(t *testing.T) { // start using pool again p.InitHashPool(ctx, templateDB1, initFunc) assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) - testDB, err := p.GetTestDatabase(ctx, hash1, 0) + testDB, err := p.GetTestDatabase(ctx, hash1, 1*time.Second) assert.NoError(t, err) assert.Equal(t, 0, testDB.ID) } @@ -320,19 +322,16 @@ func TestPoolReturnTestDatabase(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 10, - PoolMaxParallelTasks: 3, + MaxPoolSize: 10, + PoolMaxParallelTasks: 3, + DisableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! } p := pool.NewPoolCollection(cfg) - t.Cleanup(func() { p.Stop() }) p.InitHashPool(ctx, templateDB1, initFunc) // add just one test DB require.NoError(t, p.AddTestDatabase(ctx, templateDB1)) - // stop the workers to prevent auto cleaning in background - p.Stop() - testDB1, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) assert.NoError(t, err) diff --git a/pkg/templates/template_test.go b/pkg/templates/template_test.go index ef46708..3415383 100644 --- a/pkg/templates/template_test.go +++ b/pkg/templates/template_test.go @@ -28,7 +28,7 @@ func TestTemplateGetSetState(t *testing.T) { assert.Equal(t, templates.TemplateStateDiscarded, state) } -func TestTemplateWaitForReady(t *testing.T) { +func TestForReady(t *testing.T) { ctx := context.Background() goroutineNum := 10 @@ -58,7 +58,7 @@ func TestTemplateWaitForReady(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - timeout := 3 * time.Millisecond + timeout := 30 * time.Millisecond state := t1.WaitUntilFinalized(ctx, timeout) if state != templates.TemplateStateInit { errsChan <- errors.New(fmt.Sprintf("expected state %v (init), but is %v", templates.TemplateStateInit, state)) @@ -67,7 +67,7 @@ func TestTemplateWaitForReady(t *testing.T) { } // now set state - time.Sleep(5 * time.Millisecond) + time.Sleep(50 * time.Millisecond) t1.SetState(ctx, templates.TemplateStateFinalized) wg.Wait() From 1283351b91a7907792c7f27f3af37195665c522f Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 29 Aug 2023 16:15:22 +0200 Subject: [PATCH 138/160] redesign recreate handling --- pkg/manager/manager.go | 28 ++---- pkg/manager/manager_test.go | 9 +- pkg/pool/pool.go | 188 +++++++++++++++++++----------------- 3 files changed, 117 insertions(+), 108 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 0e37bb8..1586123 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -331,7 +331,7 @@ func (m Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) er // check if the template exists and is finalized template, found := m.templates.Get(ctx, hash) if !found { - return m.dropDatabaseWithID(ctx, hash, id) + return ErrTemplateNotFound } if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != @@ -342,15 +342,10 @@ func (m Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) er // template is ready, we can return unchanged testDB to the pool if err := m.pool.ReturnTestDatabase(ctx, hash, id); err != nil { - if !(errors.Is(err, pool.ErrInvalidIndex) || - errors.Is(err, pool.ErrUnknownHash)) { - // other error is an internal error - return err - } - // db is not tracked in the pool - // try to drop it if exists - return m.dropDatabaseWithID(ctx, hash, id) + fmt.Printf("manager.ReturnTestDatabase error: %v\n", err) + + return err } return nil @@ -368,7 +363,7 @@ func (m *Manager) RecreateTestDatabase(ctx context.Context, hash string, id int) // check if the template exists and is finalized template, found := m.templates.Get(ctx, hash) if !found { - return m.dropDatabaseWithID(ctx, hash, id) + return ErrTemplateNotFound } if template.WaitUntilFinalized(ctx, m.config.TemplateFinalizeTimeout) != @@ -376,17 +371,12 @@ func (m *Manager) RecreateTestDatabase(ctx context.Context, hash string, id int) return ErrInvalidTemplateState } - // template is ready, we can returb the testDB to the pool and have it cleaned up + // template is ready, we can return the testDB to the pool and have it cleaned up if err := m.pool.RecreateTestDatabase(ctx, hash, id); err != nil { - if !(errors.Is(err, pool.ErrInvalidIndex) || - errors.Is(err, pool.ErrUnknownHash)) { - // other error is an internal error - return err - } - // db is not tracked in the pool - // try to drop it if exists - return m.dropDatabaseWithID(ctx, hash, id) + fmt.Printf("manager.RecreateTestDatabase error: %v\n", err) + + return err } return nil diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 9763cc6..4d2a614 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -675,6 +675,9 @@ func TestManagerGetAndRecreateTestDatabase(t *testing.T) { // request many more databases than initally added for i := 0; i <= cfg.TestDatabaseMaxPoolSize*2; i++ { test, err := m.GetTestDatabase(ctx, hash) + + t.Logf("open %v", test.ID) + assert.NoError(t, err) assert.NotEmpty(t, test) @@ -692,6 +695,8 @@ func TestManagerGetAndRecreateTestDatabase(t *testing.T) { require.NoError(t, err) assert.NoError(t, db.QueryRowContext(ctx, "SELECT COUNT(*) FROM pilots WHERE name = 'Anna'").Scan(&res)) assert.Equal(t, 1, res) + + t.Logf("close %v", test.ID) db.Close() // recreate testDB after usage @@ -882,8 +887,8 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { t.Fatalf("failed to manually create template database %q: %v", dbName, err) } - if err := m.ReturnTestDatabase(ctx, hash, id); err != nil { - t.Fatalf("failed to return manually created test database: %v", err) + if err := m.ReturnTestDatabase(ctx, hash, id); err == nil { + t.Fatalf("succeeded to return manually created test database: %v", err) // this should not work! } } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 99f1549..76696a5 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -43,9 +43,10 @@ const ( // HashPool holds a test DB pool for a certain hash. Each HashPool is running cleanup workers in background. type HashPool struct { - dbs []existingDB - ready chan int // ID of initalized DBs according to a template, ready to pick them up - dirty chan int // ID of DBs that were given away and need to be recreated to reuse them + dbs []existingDB + ready chan int // ID of initalized DBs according to a template, ready to pick them up + dirty chan int // ID of DBs that were given away and need to be recreated to reuse them + recreating chan struct{} // tracks currently running recreating ops recreateDB recreateTestDBFunc templateDB db.Database @@ -54,8 +55,9 @@ type HashPool struct { sync.RWMutex wg sync.WaitGroup - tasksChan chan string - running bool + tasksChan chan string + running bool + workerContext context.Context // the ctx all background workers will receive (nil if not yet started) } // NewHashPool creates new hash pool with the given config. @@ -67,9 +69,10 @@ func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFu } pool := &HashPool{ - dbs: make([]existingDB, 0, cfg.MaxPoolSize), - ready: make(chan int, cfg.MaxPoolSize), - dirty: make(chan int, cfg.MaxPoolSize), + dbs: make([]existingDB, 0, cfg.MaxPoolSize), + ready: make(chan int, cfg.MaxPoolSize), + dirty: make(chan int, cfg.MaxPoolSize), + recreating: make(chan struct{}, cfg.MaxPoolSize), recreateDB: makeActualRecreateTestDBFunc(templateDB.Config.Database, initDBFunc), templateDB: templateDB, @@ -91,6 +94,10 @@ func (pool *HashPool) Start() { } pool.running = true + + ctx, cancel := context.WithCancel(context.Background()) + pool.workerContext = ctx + for i := 0; i < pool.InitialPoolSize; i++ { pool.tasksChan <- workerTaskExtend } @@ -98,7 +105,7 @@ func (pool *HashPool) Start() { pool.wg.Add(1) go func() { defer pool.wg.Done() - pool.controlLoop() + pool.controlLoop(ctx, cancel) }() } @@ -112,6 +119,7 @@ func (pool *HashPool) Stop() { pool.tasksChan <- workerTaskStop pool.wg.Wait() + pool.workerContext = nil } func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { @@ -162,12 +170,12 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout } // we try to ensure that InitialPoolSize count is staying ready - // thus, we try to move the oldest dirty dbs into cleaning - if len(pool.dbs) >= pool.PoolConfig.MaxPoolSize && len(pool.ready) < pool.InitialPoolSize { + // thus, we try to move the oldest dirty dbs into recreating with the workerTaskCleanDirty + if len(pool.dbs) >= pool.PoolConfig.MaxPoolSize && (len(pool.ready)+len(pool.recreating)) < pool.InitialPoolSize { pool.tasksChan <- workerTaskCleanDirty } - fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) return testDB.TestDatabase, nil } @@ -219,11 +227,11 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string } } -func (pool *HashPool) controlLoop() { +func (pool *HashPool) controlLoop(ctx context.Context, cancel context.CancelFunc) { fmt.Printf("pool#%s: controlLoop\n", pool.templateDB.TemplateHash) - ctx, cancel := context.WithCancel(context.Background()) + // ctx, cancel := context.WithCancel(context.Background()) defer cancel() workerTasksChan := make(chan string, len(pool.tasksChan)) @@ -272,7 +280,7 @@ func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id in pool.excludeIDFromDirtyChannel(id) pool.ready <- id - fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (ReturnTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (ReturnTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) return nil } @@ -282,7 +290,7 @@ func (pool *HashPool) excludeIDFromDirtyChannel(id int) { // The testDB identified by overgiven id may still in the dirty channel. We want to exclude it. // We need to explicitly remove it from there by filtering the current channel to a tmp channel. // We finally close the tmp channel and flush it onto the dirty channel again. - // The db is ready again. + // The id is now no longer in the channel. filteredDirty := make(chan int, pool.MaxPoolSize) var dirtyID int @@ -298,7 +306,7 @@ func (pool *HashPool) excludeIDFromDirtyChannel(id int) { } } - // filteredDirty now has all filtered values without the returned id, redirect the other back to the dirty channel. + // filteredDirty now has all filtered values without the above id, redirect the other ids back to the dirty channel. // close so we can range over it... close(filteredDirty) @@ -307,7 +315,7 @@ func (pool *HashPool) excludeIDFromDirtyChannel(id int) { } } -// RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. +// RecreateTestDatabase prioritizes the test DB to be recreated next via the dirty worker. func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id int) error { pool.RLock() @@ -316,39 +324,92 @@ func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id return ErrInvalidIndex } - // check if db is in the correct state - testDB := pool.dbs[id] + fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (RecreateTestDatabase %v)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize, id) pool.RUnlock() - if testDB.state == dbStateReady { + // exclude from the normal dirty channel, force recreation in a background worker... + pool.excludeIDFromDirtyChannel(id) + + // directly spawn a new worker in the bg (with the same ctx as the typical workers) + go pool.recreateDatabaseGracefully(pool.workerContext, id) + + return nil +} + +// recreateDatabaseGracefully continuosly tries to recreate the testdatabase and will retry/block until it succeeds +func (pool *HashPool) recreateDatabaseGracefully(ctx context.Context, id int) error { + + if ctx.Err() != nil { + // pool closed in the meantime. + return ctx.Err() + } + + pool.RLock() + + if pool.dbs[id].state == dbStateReady { + // nothing to do + pool.RUnlock() return nil } - // state is dirty -> we will now recreate it - if err := pool.recreateDB(ctx, &testDB); err != nil { - return err + testDB := pool.dbs[id] + pool.RUnlock() + + pool.recreating <- struct{}{} + + defer func() { + <-pool.recreating + }() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + fmt.Printf("recreateDatabaseGracefully: recreating ID='%v'...\n", id) + err := pool.recreateDB(ctx, &testDB) + if err != nil { + fmt.Println(err) + if errors.Is(err, ErrTestDBInUse) { + fmt.Printf("recreateDatabaseGracefully: DB is still in use, will retry ID='%v'.\n", id) + time.Sleep(250 * time.Millisecond) // TODO make configurable and/or exponential retry backoff... + } else { + fmt.Printf("recreateDatabaseGracefully: db error while cleanup ID='%v' err=%v\n", id, err) + return nil // noop + } + } else { + goto MoveToReady + } + } } +MoveToReady: pool.Lock() defer pool.Unlock() - // change the state to 'ready' - testDB.state = dbStateReady - pool.dbs[id] = testDB + if ctx.Err() != nil { + // pool closed in the meantime. + return ctx.Err() + } - // remove id from dirty and add it to ready channel - pool.excludeIDFromDirtyChannel(id) - pool.ready <- id + fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (recreateDatabaseGracefully %v)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize, id) - fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (RecreateTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + if pool.dbs[id].state == dbStateReady { + // oups, it has been cleaned by another worker already + // we won't add it to the 'ready' channel to avoid duplication + return nil + } - return nil + pool.dbs[id].state = dbStateReady + // pool.dbs[id] = testDB + pool.ready <- pool.dbs[id].ID + + return nil } // cleanDirty reads 'dirty' channel and cleans up a test DB with the received index. // When the DB is recreated according to a template, its index goes to the 'ready' channel. -// The function waits until there is a dirty DB... func (pool *HashPool) cleanDirty(ctx context.Context) error { ctx, task := trace.NewTask(ctx, "worker_clean_dirty") @@ -361,68 +422,25 @@ func (pool *HashPool) cleanDirty(ctx context.Context) error { return ctx.Err() default: // nothing to do - fmt.Println("cleanDirty noop", id) + fmt.Println("cleanDirty noop") return nil } fmt.Printf("pool#%s: cleanDirty %v\n", pool.templateDB.TemplateHash, id) regLock := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") - pool.Lock() + pool.RLock() regLock.End() if id < 0 || id >= len(pool.dbs) { // sanity check, should never happen - pool.Unlock() + pool.RUnlock() return ErrInvalidIndex } - testDB := pool.dbs[id] - pool.Unlock() - - if testDB.state == dbStateReady { - // nothing to do - return nil - } - - reg := trace.StartRegion(ctx, "worker_db_operation") - err := pool.recreateDB(ctx, &testDB) - reg.End() - - if err != nil { - // fmt.Printf("worker_clean_dirty: failed to clean up DB ID='%v': %v\n", id, err) - - // we guarantee FIFO, we must keeping trying to clean up **exactly this** test database! - if errors.Is(err, ErrTestDBInUse) { - - fmt.Printf("worker_clean_dirty: scheduling retry cleanup for ID='%v'...\n", id) - time.Sleep(250 * time.Millisecond) - fmt.Printf("integworker_clean_dirtyresql: push DB ID='%v' into retry.", id) - pool.dirty <- id - pool.tasksChan <- workerTaskCleanDirty - return nil - } - - return err - } - - regLock = trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") - pool.Lock() - defer pool.Unlock() - regLock.End() - - if testDB.state == dbStateReady { - // oups, it has been cleaned by another worker already - // we won't add it to the 'ready' channel to avoid duplication - return nil - } - - testDB.state = dbStateReady - pool.dbs[id] = testDB - - pool.ready <- testDB.ID + // testDB := pool.dbs[id] + pool.RUnlock() - fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (cleanDirty)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) - return nil + return pool.recreateDatabaseGracefully(ctx, id) } func ignoreErrs(f func(ctx context.Context) error, errs ...error) func(context.Context) error { @@ -446,7 +464,7 @@ func (pool *HashPool) extend(ctx context.Context) error { pool.Lock() defer pool.Unlock() - fmt.Printf("pool#%s: ready=%d, dirty=%d, tasksChan=%d, dbs=%d initial=%d max=%d (extend)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (extend)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) reg.End() // get index of a next test DB - its ID @@ -490,8 +508,6 @@ func (pool *HashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) er // stop all workers pool.Stop() - // ! - // HashPool locked pool.Lock() defer pool.Unlock() @@ -517,6 +533,4 @@ func (pool *HashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) er close(pool.tasksChan) return nil - // HashPool unlocked - // ! } From a3b5be0a4700797206ad23dcf2a8fd910c23bf1a Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Wed, 30 Aug 2023 17:37:59 +0200 Subject: [PATCH 139/160] adds backoff handling for recreateDatabaseGracefully --- pkg/manager/manager.go | 29 +++++++++------------- pkg/manager/manager_config.go | 42 ++++++++++++++++---------------- pkg/manager/manager_test.go | 34 +++++++++++++------------- pkg/pool/pool.go | 32 +++++++++++++++--------- pkg/pool/pool_collection.go | 13 ++++++---- pkg/pool/pool_collection_test.go | 30 +++++++++++------------ 6 files changed, 94 insertions(+), 86 deletions(-) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 1586123..355fd1e 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -37,10 +37,12 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { if config.DatabasePrefix != "" { testDBPrefix = testDBPrefix + fmt.Sprintf("%s_", config.DatabasePrefix) } - if config.TestDatabasePrefix != "" { - testDBPrefix = testDBPrefix + fmt.Sprintf("%s_", config.TestDatabasePrefix) + if config.PoolConfig.TestDBNamePrefix != "" { + testDBPrefix = testDBPrefix + fmt.Sprintf("%s_", config.PoolConfig.TestDBNamePrefix) } + config.PoolConfig.TestDBNamePrefix = testDBPrefix + if len(config.TestDatabaseOwner) == 0 { config.TestDatabaseOwner = config.ManagerDatabaseConfig.Username } @@ -50,30 +52,23 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { } // at least one test database needs to be present initially - if config.TestDatabaseInitialPoolSize == 0 { - config.TestDatabaseInitialPoolSize = 1 + if config.PoolConfig.InitialPoolSize == 0 { + config.PoolConfig.InitialPoolSize = 1 } - if config.TestDatabaseInitialPoolSize > config.TestDatabaseMaxPoolSize && config.TestDatabaseMaxPoolSize > 0 { - config.TestDatabaseInitialPoolSize = config.TestDatabaseMaxPoolSize + if config.PoolConfig.InitialPoolSize > config.PoolConfig.MaxPoolSize && config.PoolConfig.MaxPoolSize > 0 { + config.PoolConfig.InitialPoolSize = config.PoolConfig.MaxPoolSize } - if config.PoolMaxParallelTasks < 1 { - config.PoolMaxParallelTasks = 1 + if config.PoolConfig.MaxParallelTasks < 1 { + config.PoolConfig.MaxParallelTasks = 1 } m := &Manager{ config: config, db: nil, templates: templates.NewCollection(), - pool: pool.NewPoolCollection( - pool.PoolConfig{ - MaxPoolSize: config.TestDatabaseMaxPoolSize, - InitialPoolSize: config.TestDatabaseInitialPoolSize, - TestDBNamePrefix: testDBPrefix, - PoolMaxParallelTasks: config.PoolMaxParallelTasks, - }, - ), + pool: pool.NewPoolCollection(config.PoolConfig), } return m, m.config @@ -139,7 +134,7 @@ func (m *Manager) Initialize(ctx context.Context) error { } } - rows, err := m.db.QueryContext(ctx, "SELECT datname FROM pg_database WHERE datname LIKE $1", fmt.Sprintf("%s_%s_%%", m.config.DatabasePrefix, m.config.TestDatabasePrefix)) + rows, err := m.db.QueryContext(ctx, "SELECT datname FROM pg_database WHERE datname LIKE $1", fmt.Sprintf("%s_%s_%%", m.config.DatabasePrefix, m.config.PoolConfig.TestDBNamePrefix)) if err != nil { return err } diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 435ec11..55b36b0 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -5,6 +5,7 @@ import ( "time" "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/pool" "github.com/allaboutapps/integresql/pkg/util" ) @@ -12,16 +13,14 @@ type ManagerConfig struct { ManagerDatabaseConfig db.DatabaseConfig TemplateDatabaseTemplate string - DatabasePrefix string - TemplateDatabasePrefix string - TestDatabasePrefix string - TestDatabaseOwner string - TestDatabaseOwnerPassword string - TestDatabaseInitialPoolSize int // Initial number of ready DBs prepared in background - TestDatabaseMaxPoolSize int // Maximal pool size that won't be exceeded - TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state - TestDatabaseGetTimeout time.Duration // Time to wait for a ready database - PoolMaxParallelTasks int // Maximal number of pool tasks running in parallel. Must be a number greater or equal 1. + DatabasePrefix string + TemplateDatabasePrefix string + TestDatabaseOwner string + TestDatabaseOwnerPassword string + TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state + TestDatabaseGetTimeout time.Duration // Time to wait for a ready database + + PoolConfig pool.PoolConfig } func DefaultManagerConfigFromEnv() ManagerConfig { @@ -50,18 +49,19 @@ func DefaultManagerConfigFromEnv() ManagerConfig { // DatabasePrefix_TemplateDatabasePrefix_HASH TemplateDatabasePrefix: util.GetEnv("INTEGRESQL_TEMPLATE_DB_PREFIX", "template"), - // DatabasePrefix_TestDatabasePrefix_HASH_ID - TestDatabasePrefix: util.GetEnv("INTEGRESQL_TEST_DB_PREFIX", "test"), - - // reuse the same user (PGUSER) and passwort (PGPASSWORT) for the test / template databases by default + // we reuse the same user (PGUSER) and passwort (PGPASSWORT) for the test / template databases by default TestDatabaseOwner: util.GetEnv("INTEGRESQL_TEST_PGUSER", util.GetEnv("INTEGRESQL_PGUSER", util.GetEnv("PGUSER", "postgres"))), TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), - // TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", 10), - TestDatabaseInitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", runtime.NumCPU()), - // TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", 500), - TestDatabaseMaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", runtime.NumCPU()*4), - TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 5*60*1000 /*5 min*/)), - TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 1*60*1000 /*1 min, timeout hardcoded also in GET request handler*/)), - PoolMaxParallelTasks: util.GetEnvAsInt("INTEGRESQL_POOL_MAX_PARALLEL_TASKS", runtime.NumCPU()), + TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 5*60*1000 /*5 min*/)), + TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 1*60*1000 /*1 min, timeout hardcoded also in GET request handler*/)), + + PoolConfig: pool.PoolConfig{ + InitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", runtime.NumCPU()), // previously default 10 + MaxPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_MAX_POOL_SIZE", runtime.NumCPU()*4), // previously default 500 + TestDBNamePrefix: util.GetEnv("INTEGRESQL_TEST_DB_PREFIX", "test"), // DatabasePrefix_TestDBNamePrefix_HASH_ID + MaxParallelTasks: util.GetEnvAsInt("INTEGRESQL_POOL_MAX_PARALLEL_TASKS", runtime.NumCPU()), + TestDatabaseRetryRecreateSleepMin: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MIN_MS", 250 /*250 ms*/)), + TestDatabaseRetryRecreateSleepMax: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MAX_MS", 1000*3 /*3 sec*/)), + }, } } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 4d2a614..045a5ca 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -310,8 +310,8 @@ func TestManagerGetTestDatabaseExtendPool(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() cfg.TestDatabaseGetTimeout = 300 * time.Millisecond - cfg.TestDatabaseInitialPoolSize = 0 // this will be autotransformed to 1 during init - cfg.TestDatabaseMaxPoolSize = 10 + cfg.PoolConfig.InitialPoolSize = 0 // this will be autotransformed to 1 during init + cfg.PoolConfig.MaxPoolSize = 10 m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -335,7 +335,7 @@ func TestManagerGetTestDatabaseExtendPool(t *testing.T) { previousID := -1 // assert than one by one pool will be extended - for i := 0; i < cfg.TestDatabaseMaxPoolSize; i++ { + for i := 0; i < cfg.PoolConfig.MaxPoolSize; i++ { testDB, err := m.GetTestDatabase(ctx, hash) assert.NoError(t, err) assert.Equal(t, previousID+1, testDB.ID) @@ -606,8 +606,8 @@ func TestManagerGetAndReturnTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseInitialPoolSize = 3 - cfg.TestDatabaseMaxPoolSize = 3 + cfg.PoolConfig.InitialPoolSize = 3 + cfg.PoolConfig.MaxPoolSize = 3 cfg.TestDatabaseGetTimeout = 200 * time.Millisecond m, _ := testManagerWithConfig(cfg) @@ -631,7 +631,7 @@ func TestManagerGetAndReturnTestDatabase(t *testing.T) { } // request many more databases than initally added - for i := 0; i <= cfg.TestDatabaseMaxPoolSize*3; i++ { + for i := 0; i <= cfg.PoolConfig.MaxPoolSize*3; i++ { test, err := m.GetTestDatabase(ctx, hash) assert.NoError(t, err) assert.NotEmpty(t, test) @@ -648,9 +648,9 @@ func TestManagerGetAndRecreateTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseInitialPoolSize = 4 - cfg.TestDatabaseMaxPoolSize = 8 - cfg.TestDatabaseGetTimeout = 250 * time.Millisecond + cfg.PoolConfig.InitialPoolSize = 4 + cfg.PoolConfig.MaxPoolSize = 8 + cfg.TestDatabaseGetTimeout = 500 * time.Millisecond m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -673,7 +673,7 @@ func TestManagerGetAndRecreateTestDatabase(t *testing.T) { } // request many more databases than initally added - for i := 0; i <= cfg.TestDatabaseMaxPoolSize*2; i++ { + for i := 0; i <= cfg.PoolConfig.MaxPoolSize*2; i++ { test, err := m.GetTestDatabase(ctx, hash) t.Logf("open %v", test.ID) @@ -712,8 +712,8 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseInitialPoolSize = 5 - cfg.TestDatabaseMaxPoolSize = 5 + cfg.PoolConfig.InitialPoolSize = 5 + cfg.PoolConfig.MaxPoolSize = 5 cfg.TestDatabaseGetTimeout = time.Second m, _ := testManagerWithConfig(cfg) @@ -737,7 +737,7 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { } var wg sync.WaitGroup - for i := 0; i < cfg.TestDatabaseMaxPoolSize*5; i++ { + for i := 0; i < cfg.PoolConfig.MaxPoolSize*5; i++ { wg.Add(1) go func(i int) { defer wg.Done() @@ -782,8 +782,8 @@ func TestManagerReturnTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.TestDatabaseInitialPoolSize = 1 - cfg.TestDatabaseMaxPoolSize = 10 + cfg.PoolConfig.InitialPoolSize = 1 + cfg.PoolConfig.MaxPoolSize = 10 cfg.TestDatabaseGetTimeout = 200 * time.Millisecond m, _ := testManagerWithConfig(cfg) @@ -878,7 +878,7 @@ func TestManagerReturnUntrackedTemplateDatabase(t *testing.T) { } id := 321 - dbName := fmt.Sprintf("%s_%s_%s_%d", config.DatabasePrefix, config.TestDatabasePrefix, hash, id) + dbName := fmt.Sprintf("%s_%s_%s_%d", config.DatabasePrefix, config.PoolConfig.TestDBNamePrefix, hash, id) if _, err := db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { t.Fatalf("failed to manually drop template database %q: %v", dbName, err) @@ -981,7 +981,7 @@ func TestManagerClearTrackedTestDatabases(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() // there are no db added in background - cfg.TestDatabaseInitialPoolSize = 0 + cfg.PoolConfig.InitialPoolSize = 0 m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 76696a5..8054bde 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -64,8 +64,8 @@ type HashPool struct { // Starts the workers to extend the pool in background up to requested inital number. func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *HashPool { - if cfg.PoolMaxParallelTasks < minConcurrentTasksNum { - cfg.PoolMaxParallelTasks = minConcurrentTasksNum + if cfg.MaxParallelTasks < minConcurrentTasksNum { + cfg.MaxParallelTasks = minConcurrentTasksNum } pool := &HashPool{ @@ -184,7 +184,7 @@ func (pool *HashPool) AddTestDatabase(ctx context.Context, templateDB db.Databas return pool.extend(ctx) } -func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string, poolMaxParallelTasks int) { +func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string, MaxParallelTasks int) { fmt.Printf("pool#%s: workerTaskLoop\n", pool.templateDB.TemplateHash) @@ -194,7 +194,7 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string } // to limit the number of running goroutines. - var semaphore = make(chan struct{}, poolMaxParallelTasks) + var semaphore = make(chan struct{}, MaxParallelTasks) for task := range taskChan { handler, ok := handlers[task] @@ -238,7 +238,7 @@ func (pool *HashPool) controlLoop(ctx context.Context, cancel context.CancelFunc pool.wg.Add(1) go func() { defer pool.wg.Done() - pool.workerTaskLoop(ctx, workerTasksChan, pool.PoolMaxParallelTasks) + pool.workerTaskLoop(ctx, workerTasksChan, pool.MaxParallelTasks) }() for task := range pool.tasksChan { @@ -361,21 +361,31 @@ func (pool *HashPool) recreateDatabaseGracefully(ctx context.Context, id int) er <-pool.recreating }() + try := 0 + for { select { case <-ctx.Done(): return ctx.Err() default: - fmt.Printf("recreateDatabaseGracefully: recreating ID='%v'...\n", id) + try++ + + fmt.Printf("recreateDatabaseGracefully: recreating ID='%v' try=%v...\n", id, try) err := pool.recreateDB(ctx, &testDB) if err != nil { - fmt.Println(err) + // only still connected errors are worthy a retry if errors.Is(err, ErrTestDBInUse) { - fmt.Printf("recreateDatabaseGracefully: DB is still in use, will retry ID='%v'.\n", id) - time.Sleep(250 * time.Millisecond) // TODO make configurable and/or exponential retry backoff... + + backoff := time.Duration(try) * pool.PoolConfig.TestDatabaseRetryRecreateSleepMin + if backoff > pool.PoolConfig.TestDatabaseRetryRecreateSleepMax { + backoff = pool.PoolConfig.TestDatabaseRetryRecreateSleepMax + } + + fmt.Printf("recreateDatabaseGracefully: DB is still in use, will retry ID='%v' try=%v in backoff=%v.\n", id, try, backoff) + time.Sleep(backoff) } else { - fmt.Printf("recreateDatabaseGracefully: db error while cleanup ID='%v' err=%v\n", id, err) - return nil // noop + fmt.Printf("recreateDatabaseGracefully: bailout worker task DB error while cleanup ID='%v' try=%v err=%v\n", id, try, err) + return err } } else { goto MoveToReady diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index b509e9a..b8e35fd 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -14,11 +14,14 @@ import ( var ErrUnknownHash = errors.New("no database pool exists for this hash") type PoolConfig struct { - MaxPoolSize int - InitialPoolSize int - TestDBNamePrefix string - PoolMaxParallelTasks int - DisableWorkerAutostart bool + InitialPoolSize int // Initial number of ready DBs prepared in background + MaxPoolSize int // Maximal pool size that won't be exceeded + TestDBNamePrefix string // Test-Database prefix: DatabasePrefix_TestDBNamePrefix_HASH_ID + MaxParallelTasks int // Maximal number of pool tasks running in parallel. Must be a number greater or equal 1. + TestDatabaseRetryRecreateSleepMin time.Duration // Minimal time to wait after a test db recreate has failed (e.g. as client is still connected). Subsequent retries multiply this values until... + TestDatabaseRetryRecreateSleepMax time.Duration // ... the maximum possible sleep time between retries (e.g. 3 seconds) is reached. + + DisableWorkerAutostart bool // test only flag for starting without background worker task system } type PoolCollection struct { diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index 465c515..00eafca 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -18,7 +18,7 @@ func TestPoolAddGet(t *testing.T) { ctx := context.Background() cfg := pool.PoolConfig{ MaxPoolSize: 2, - PoolMaxParallelTasks: 4, + MaxParallelTasks: 4, TestDBNamePrefix: "prefix_", DisableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! } @@ -94,10 +94,10 @@ func TestPoolAddGetConcurrent(t *testing.T) { maxPoolSize := 15 cfg := pool.PoolConfig{ - MaxPoolSize: maxPoolSize, - InitialPoolSize: maxPoolSize, - PoolMaxParallelTasks: 4, - TestDBNamePrefix: "", + MaxPoolSize: maxPoolSize, + InitialPoolSize: maxPoolSize, + MaxParallelTasks: 4, + TestDBNamePrefix: "", } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) @@ -149,9 +149,9 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 40, - PoolMaxParallelTasks: 4, - TestDBNamePrefix: "", + MaxPoolSize: 40, + MaxParallelTasks: 4, + TestDBNamePrefix: "", } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) @@ -212,8 +212,8 @@ func TestPoolRemoveAll(t *testing.T) { } cfg := pool.PoolConfig{ - MaxPoolSize: 6, - PoolMaxParallelTasks: 4, + MaxPoolSize: 6, + MaxParallelTasks: 4, } p := pool.NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) @@ -263,10 +263,10 @@ func TestPoolReuseDirty(t *testing.T) { maxPoolSize := 40 cfg := pool.PoolConfig{ - MaxPoolSize: maxPoolSize, - InitialPoolSize: maxPoolSize, - PoolMaxParallelTasks: 1, - TestDBNamePrefix: "test_", + MaxPoolSize: maxPoolSize, + InitialPoolSize: maxPoolSize, + MaxParallelTasks: 1, + TestDBNamePrefix: "test_", } p := pool.NewPoolCollection(cfg) @@ -323,7 +323,7 @@ func TestPoolReturnTestDatabase(t *testing.T) { cfg := pool.PoolConfig{ MaxPoolSize: 10, - PoolMaxParallelTasks: 3, + MaxParallelTasks: 3, DisableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! } p := pool.NewPoolCollection(cfg) From 5bcf409de0f413072346500d5921248b07fadef3 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Wed, 30 Aug 2023 19:44:35 +0200 Subject: [PATCH 140/160] adds test database generations and TestDatabaseMinimalLifetime (blocks auto clean for specific time on issued databases) to properly deal with pressure caused by fast test db issuance and out-of-order recreates which might cause interferances --- pkg/manager/manager_config.go | 1 + pkg/manager/manager_test.go | 8 +-- pkg/pool/pool.go | 86 ++++++++++++++++++++++++-------- pkg/pool/pool_collection.go | 1 + pkg/pool/pool_collection_test.go | 2 +- 5 files changed, 73 insertions(+), 25 deletions(-) diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 55b36b0..4053003 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -62,6 +62,7 @@ func DefaultManagerConfigFromEnv() ManagerConfig { MaxParallelTasks: util.GetEnvAsInt("INTEGRESQL_POOL_MAX_PARALLEL_TASKS", runtime.NumCPU()), TestDatabaseRetryRecreateSleepMin: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MIN_MS", 250 /*250 ms*/)), TestDatabaseRetryRecreateSleepMax: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MAX_MS", 1000*3 /*3 sec*/)), + TestDatabaseMinimalLifetime: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_MINIMAL_LIFETIME_MS", 250 /*250 ms*/)), }, } } diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 045a5ca..676bb43 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -648,9 +648,9 @@ func TestManagerGetAndRecreateTestDatabase(t *testing.T) { ctx := context.Background() cfg := manager.DefaultManagerConfigFromEnv() - cfg.PoolConfig.InitialPoolSize = 4 + cfg.PoolConfig.InitialPoolSize = 8 cfg.PoolConfig.MaxPoolSize = 8 - cfg.TestDatabaseGetTimeout = 500 * time.Millisecond + cfg.TestDatabaseGetTimeout = 1000 * time.Millisecond m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { @@ -673,7 +673,7 @@ func TestManagerGetAndRecreateTestDatabase(t *testing.T) { } // request many more databases than initally added - for i := 0; i <= cfg.PoolConfig.MaxPoolSize*2; i++ { + for i := 0; i <= cfg.PoolConfig.MaxPoolSize*5; i++ { test, err := m.GetTestDatabase(ctx, hash) t.Logf("open %v", test.ID) @@ -714,7 +714,7 @@ func TestManagerGetTestDatabaseDontReturn(t *testing.T) { cfg := manager.DefaultManagerConfigFromEnv() cfg.PoolConfig.InitialPoolSize = 5 cfg.PoolConfig.MaxPoolSize = 5 - cfg.TestDatabaseGetTimeout = time.Second + cfg.TestDatabaseGetTimeout = time.Second * 5 m, _ := testManagerWithConfig(cfg) if err := m.Initialize(ctx); err != nil { diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 8054bde..6b82a00 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -22,8 +22,9 @@ var ( type dbState int // Indicates a current DB state. const ( - dbStateReady dbState = iota // Initialized according to a template and ready to be picked up. - dbStateDirty // Taken by a client and potentially currently in use. + dbStateReady dbState = iota // Initialized according to a template and ready to be picked up. + dbStateDirty // Taken by a client and potentially currently in use. + dbStateRecreating // In the process of being recreated (to prevent concurrent cleans) ) const minConcurrentTasksNum = 1 @@ -31,14 +32,24 @@ const minConcurrentTasksNum = 1 type existingDB struct { state dbState db.TestDatabase + + // To prevent auto-cleans of a testdatabase on the dirty channel directly after it was issued as ready, + // each testdatabase gets a timestamp assigned after which auto-cleaning it generally allowed (unlock + // and recreate do not respect this). This timeout is typically very low and should only be neccessary + // to be tweaked in scenarios in which the pool is overloaded by requests. + // Prefer to tweak InitialPoolSize (the always ready dbs) and MaxPoolSize instead if you have issues here. + blockAutoCleanDirtyUntil time.Time + + // increased after each recreation, useful for sleepy recreating workers to check if we still operate on the same gen. + generation uint } type workerTask string const ( - workerTaskStop = "STOP" - workerTaskExtend = "EXTEND" - workerTaskCleanDirty = "CLEAN_DIRTY" + workerTaskStop = "STOP" + workerTaskExtend = "EXTEND" + workerTaskAutoCleanDirty = "CLEAN_DIRTY" ) // HashPool holds a test DB pool for a certain hash. Each HashPool is running cleanup workers in background. @@ -160,7 +171,10 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout return } + // flag as dirty and block auto clean until testDB.state = dbStateDirty + testDB.blockAutoCleanDirtyUntil = time.Now().Add(pool.TestDatabaseMinimalLifetime) + pool.dbs[index] = testDB pool.dirty <- index @@ -170,9 +184,9 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout } // we try to ensure that InitialPoolSize count is staying ready - // thus, we try to move the oldest dirty dbs into recreating with the workerTaskCleanDirty + // thus, we try to move the oldest dirty dbs into recreating with the workerTaskAutoCleanDirty if len(pool.dbs) >= pool.PoolConfig.MaxPoolSize && (len(pool.ready)+len(pool.recreating)) < pool.InitialPoolSize { - pool.tasksChan <- workerTaskCleanDirty + pool.tasksChan <- workerTaskAutoCleanDirty } fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) @@ -189,8 +203,8 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string fmt.Printf("pool#%s: workerTaskLoop\n", pool.templateDB.TemplateHash) handlers := map[string]func(ctx context.Context) error{ - workerTaskExtend: ignoreErrs(pool.extend, ErrPoolFull, context.Canceled), - workerTaskCleanDirty: ignoreErrs(pool.cleanDirty, context.Canceled), + workerTaskExtend: ignoreErrs(pool.extend, ErrPoolFull, context.Canceled), + workerTaskAutoCleanDirty: ignoreErrs(pool.autoCleanDirty, context.Canceled), } // to limit the number of running goroutines. @@ -268,7 +282,7 @@ func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id in // check if db is in the correct state testDB := pool.dbs[id] - if testDB.state == dbStateReady { + if testDB.state != dbStateDirty { return nil } @@ -344,16 +358,21 @@ func (pool *HashPool) recreateDatabaseGracefully(ctx context.Context, id int) er return ctx.Err() } - pool.RLock() + pool.Lock() - if pool.dbs[id].state == dbStateReady { + if pool.dbs[id].state != dbStateDirty { // nothing to do - pool.RUnlock() + pool.Unlock() return nil } testDB := pool.dbs[id] - pool.RUnlock() + + // set state recreating... + pool.dbs[id].state = dbStateRecreating + pool.dbs[id] = testDB + + pool.Unlock() pool.recreating <- struct{}{} @@ -410,17 +429,19 @@ MoveToReady: return nil } + // increase the generation of the testdb (as we just recreated it) and move into ready! + pool.dbs[id].generation++ pool.dbs[id].state = dbStateReady - // pool.dbs[id] = testDB pool.ready <- pool.dbs[id].ID return nil } -// cleanDirty reads 'dirty' channel and cleans up a test DB with the received index. +// autoCleanDirty reads 'dirty' channel and cleans up a test DB with the received index. // When the DB is recreated according to a template, its index goes to the 'ready' channel. -func (pool *HashPool) cleanDirty(ctx context.Context) error { +// Note that we generally gurantee FIFO when it comes to auto-cleaning as long as no manual unlock/recreates happen. +func (pool *HashPool) autoCleanDirty(ctx context.Context) error { ctx, task := trace.NewTask(ctx, "worker_clean_dirty") defer task.End() @@ -432,11 +453,11 @@ func (pool *HashPool) cleanDirty(ctx context.Context) error { return ctx.Err() default: // nothing to do - fmt.Println("cleanDirty noop") + fmt.Println("autoCleanDirty noop") return nil } - fmt.Printf("pool#%s: cleanDirty %v\n", pool.templateDB.TemplateHash, id) + fmt.Printf("pool#%s: autoCleanDirty id=%v\n", pool.templateDB.TemplateHash, id) regLock := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") pool.RLock() @@ -447,7 +468,32 @@ func (pool *HashPool) cleanDirty(ctx context.Context) error { pool.RUnlock() return ErrInvalidIndex } - // testDB := pool.dbs[id] + + blockedUntil := time.Until(pool.dbs[id].blockAutoCleanDirtyUntil) + generation := pool.dbs[id].generation + + pool.RUnlock() + + // immediately pass to pool recreate + if blockedUntil <= 0 { + return pool.recreateDatabaseGracefully(ctx, id) + } + + // else we need to wait until we are allowed to work with it! + // we block auto-cleaning until we are allowed to... + fmt.Printf("pool#%s: autoCleanDirty id=%v sleep for blockedUntil=%v...\n", pool.templateDB.TemplateHash, id, blockedUntil) + time.Sleep(blockedUntil) + + // we need to check that the testDB.generation did not change since we slept + // (which would indicate that the database was already unlocked/recreated by someone else in the meantime) + pool.RLock() + + if pool.dbs[id].generation != generation || pool.dbs[id].state != dbStateDirty { + fmt.Printf("pool#%s: autoCleanDirty id=%v bailout old generation=%v vs new generation=%v state=%v...\n", pool.templateDB.TemplateHash, id, generation, pool.dbs[id].generation, pool.dbs[id].state) + pool.RUnlock() + return nil + } + pool.RUnlock() return pool.recreateDatabaseGracefully(ctx, id) diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index b8e35fd..1a91a98 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -20,6 +20,7 @@ type PoolConfig struct { MaxParallelTasks int // Maximal number of pool tasks running in parallel. Must be a number greater or equal 1. TestDatabaseRetryRecreateSleepMin time.Duration // Minimal time to wait after a test db recreate has failed (e.g. as client is still connected). Subsequent retries multiply this values until... TestDatabaseRetryRecreateSleepMax time.Duration // ... the maximum possible sleep time between retries (e.g. 3 seconds) is reached. + TestDatabaseMinimalLifetime time.Duration // After a testdatabase transitions from ready to dirty, always block auto-recreation for this duration (except manual recreate). DisableWorkerAutostart bool // test only flag for starting without background worker task system } diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_test.go index 00eafca..90c0c05 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_test.go @@ -274,7 +274,7 @@ func TestPoolReuseDirty(t *testing.T) { t.Cleanup(func() { p.Stop() }) getDirty := func(seenIDMap *sync.Map) { - newTestDB1, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, 1*time.Second) + newTestDB1, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, 3*time.Second) assert.NoError(t, err) seenIDMap.Store(newTestDB1.ID, true) } From 96dd1c3f9c7fe98cea427a8260e0c87d015195c7 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Mon, 4 Sep 2023 15:41:29 +0200 Subject: [PATCH 141/160] fix toolchain, reenable pipefails, fix linting, cleanup --- .devcontainer/devcontainer.json | 39 +++------- .drone.yml | 2 +- .golangci.yml | 29 +++++++ Dockerfile | 77 ++++++++++++++---- Makefile | 28 ++++--- cmd/server/main.go | 8 +- docker-compose.yml | 34 +++++++- internal/api/server.go | 1 + internal/api/templates/templates.go | 78 ++++++++++--------- internal/config/build_args.go | 18 +++++ ...st.go => database_config_internal_test.go} | 0 pkg/manager/helpers_test.go | 4 +- pkg/manager/manager.go | 22 +----- pkg/manager/manager_config.go | 3 +- pkg/manager/manager_test.go | 9 ++- pkg/pool/pool.go | 32 +++++--- pkg/pool/pool_collection.go | 41 +++++----- ...st.go => pool_collection_internal_test.go} | 57 +++++++------- pkg/templates/template.go | 14 ++-- pkg/templates/template_collection.go | 2 +- pkg/templates/template_test.go | 5 +- pkg/util/retry.go | 2 +- tests/testclient/client.go | 5 +- 23 files changed, 308 insertions(+), 202 deletions(-) create mode 100644 .golangci.yml create mode 100644 internal/config/build_args.go rename pkg/db/{database_config_test.go => database_config_internal_test.go} (100%) rename pkg/pool/{pool_collection_test.go => pool_collection_internal_test.go} (86%) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index a8f6bd0..7e268fd 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -14,9 +14,11 @@ // The optional 'workspaceFolder' property is the path VS Code should open by default when // connected. This is typically a file mount in .devcontainer/docker-compose.yml "workspaceFolder": "/app", - // Set *default* container specific settings.json values on container create. + // All containers should stop if we close / reload the VSCode window. + "shutdownAction": "stopCompose", "customizations": { "vscode": { + // Set *default* container specific settings.json values on container create. "settings": { // https://github.com/golang/tools/blob/master/gopls/doc/vscode.md#vscode "go.useLanguageServer": true, @@ -45,6 +47,8 @@ // DISABLED, done via "staticcheck": false, }, + // https://code.visualstudio.com/docs/languages/go#_intellisense + "go.autocompleteUnimportedPackages": true, // https://github.com/golangci/golangci-lint#editor-integration "go.lintTool": "golangci-lint", "go.lintFlags": [ @@ -69,30 +73,6 @@ }, // ensure that the pgFormatter VSCode extension uses the pgFormatter that comes preinstalled in the Dockerfile "pgFormatter.pgFormatterPath": "/usr/local/bin/pg_format" - // "go.lintOnSave": "workspace" - // general build settings in sync with our makefile - // "go.buildFlags": [ - // "-o", - // "bin/app" - // ] - // "sqltools.connections": [ - // { - // "database": "sample", - // "dialect": "PostgreSQL", - // "name": "postgres", - // "password": "9bed16f749d74a3c8bfbced18a7647f5", - // "port": 5432, - // "server": "postgres", - // "username": "dbuser" - // } - // ], - // "sqltools.autoConnectTo": [ - // "postgres" - // ], - // // only use pg_format to actually format! - // "sqltools.formatLanguages": [], - // "sqltools.telemetry": false, - // "sqltools.autoOpenSessionFiles": false }, // Add the IDs of extensions you want installed when the container is created. "extensions": [ @@ -100,12 +80,12 @@ "golang.go", "bradymholt.pgformatter", // optional: - // "766b.go-outliner", + "42crunch.vscode-openapi", "heaths.vscode-guid", "bungcip.better-toml", "eamodio.gitlens", - "casualjim.gotemplate" - // "mtxr.sqltools", + "casualjim.gotemplate", + "yzhang.markdown-all-in-one" ] } }, @@ -115,6 +95,7 @@ // "shutdownAction": "none", // Uncomment the next line to run commands after the container is created - for example installing git. "postCreateCommand": "go version", + // "postCreateCommand": "apt-get update && apt-get install -y git", // Uncomment to connect as a non-root user. See https://aka.ms/vscode-remote/containers/non-root. - "remoteUser": "development" + // "remoteUser": "" } \ No newline at end of file diff --git a/.drone.yml b/.drone.yml index 0a8deee..ec24175 100644 --- a/.drone.yml +++ b/.drone.yml @@ -133,7 +133,7 @@ pipeline: environment: IMAGE_TAG: *IMAGE_BUILDER_ID commands: - - "docker build --target builder-integresql --compress -t $${IMAGE_TAG} ." + - "docker build --target builder --compress -t $${IMAGE_TAG} ." <<: *WHEN_BUILD_EVENT "docker build (target integresql)": diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..af28163 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,29 @@ +linters: + enable: + # https://github.com/golangci/golangci-lint#enabled-by-default-linters + # Additional linters you want to activate may be specified here... + + # --- + # https://github.com/mgechev/revive + # replacement for the now deprecated official golint linter, see https://github.com/golang/go/issues/38968 + - revive + + # --- + # https://github.com/maratori/testpackage + # used to enforce blackbox testing + - testpackage + + # --- + # https://github.com/securego/gosec + # inspects source code for security problems by scanning the Go AST. + - gosec + + # --- + # https://github.com/sivchari/tenv + # prefer t.Setenv instead of os.Setenv within test code. + - tenv + + # --- + # https://github.com/polyfloyd/go-errorlint + # ensure we are comparing errors via errors.Is, types/values via errors.As and wrap errors with %w. + - errorlint diff --git a/Dockerfile b/Dockerfile index 6d23916..b7d3416 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,23 +18,55 @@ ENV MAKEFLAGS "-j 8 --no-print-directory" # e.g. stretch=>stretch-pgdg, buster=>buster-pgdg, bullseye=>bullseye-pgdg RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ bullseye-pgdg main" \ | tee /etc/apt/sources.list.d/pgdg.list \ - && apt install curl ca-certificates gnupg \ - && curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /etc/apt/trusted.gpg.d/apt.postgresql.org.gpg >/dev/null - + && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc \ + | apt-key add - # Install required system dependencies RUN apt-get update \ && apt-get install -y \ + # + # Mandadory minimal linux packages + # Installed at development stage and app stage + # Do not forget to add mandadory linux packages to the final app Dockerfile stage below! + # + # -- START MANDADORY -- + ca-certificates \ + # --- END MANDADORY --- + # + # Development specific packages + # Only installed at development stage and NOT available in the final Docker stage + # based upon + # https://github.com/microsoft/vscode-remote-try-go/blob/master/.devcontainer/Dockerfile + # https://raw.githubusercontent.com/microsoft/vscode-dev-containers/master/script-library/common-debian.sh + # + # icu-devtools: https://stackoverflow.com/questions/58736399/how-to-get-vscode-liveshare-extension-working-when-running-inside-vscode-remote + # graphviz: https://github.com/google/pprof#building-pprof + # -- START DEVELOPMENT -- + apt-utils \ + dialog \ + openssh-client \ + less \ + iproute2 \ + procps \ + lsb-release \ locales \ sudo \ bash-completion \ bsdmainutils \ + graphviz \ + xz-utils \ postgresql-client-12 \ + icu-devtools \ + tmux \ + rsync \ + # --- END DEVELOPMENT --- + # && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# vscode support: LANG must be supported, requires installing the locale package first -# see https://github.com/Microsoft/vscode/issues/58015 +# env/vscode support: LANG must be supported, requires installing the locale package first +# https://github.com/Microsoft/vscode/issues/58015 +# https://stackoverflow.com/questions/28405902/how-to-set-the-locale-inside-a-debian-ubuntu-docker-container RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 @@ -82,6 +114,25 @@ RUN ARCH="$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)" \ # https://github.com/uw-labs/lichen/tags RUN go install github.com/uw-labs/lichen@v0.1.7 +# watchexec +# https://github.com/watchexec/watchexec/releases +RUN mkdir -p /tmp/watchexec \ + && cd /tmp/watchexec \ + && wget https://github.com/watchexec/watchexec/releases/download/v1.20.6/watchexec-1.20.6-$(arch)-unknown-linux-musl.tar.xz \ + && tar xf watchexec-1.20.6-$(arch)-unknown-linux-musl.tar.xz \ + && cp watchexec-1.20.6-$(arch)-unknown-linux-musl/watchexec /usr/local/bin/watchexec \ + && rm -rf /tmp/watchexec + +# yq +# https://github.com/mikefarah/yq/releases +RUN mkdir -p /tmp/yq \ + && cd /tmp/yq \ + && ARCH="$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)" \ + && wget "https://github.com/mikefarah/yq/releases/download/v4.30.5/yq_linux_${ARCH}.tar.gz" \ + && tar xzf "yq_linux_${ARCH}.tar.gz" \ + && cp "yq_linux_${ARCH}" /usr/local/bin/yq \ + && rm -rf /tmp/yq + # linux permissions / vscode support: Add user to avoid linux file permission issues # Detail: Inside the container, any mounted files/folders will have the exact same permissions # as outside the container - including the owner user ID (UID) and group ID (GID). @@ -100,7 +151,6 @@ RUN groupadd --gid $USER_GID $USERNAME \ && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ && chmod 0440 /etc/sudoers.d/$USERNAME - # vscode support: cached extensions install directory # https://code.visualstudio.com/docs/remote/containers-advanced#_avoiding-extension-reinstalls-on-container-rebuild RUN mkdir -p /home/$USERNAME/.vscode-server/extensions \ @@ -113,7 +163,6 @@ RUN mkdir -p /home/$USERNAME/.vscode-server/extensions \ # Note that this should be the final step after installing all build deps RUN mkdir -p /$GOPATH/pkg && chown -R $USERNAME /$GOPATH - # $GOBIN is where our own compiled binaries will live and other go.mod / VSCode binaries will be installed. # It should always come AFTER our other $PATH segments and should be earliest targeted in stage "builder", # as /app/bin will the shadowed by a volume mount via docker-compose! @@ -133,15 +182,11 @@ COPY Makefile /app/Makefile COPY go.mod /app/go.mod COPY go.sum /app/go.sum COPY tools.go /app/tools.go -RUN make modules && make tools +RUN make modules +COPY tools.go /app/tools.go +RUN make tools COPY . /app/ - -### ----------------------- -# --- Stage: builder-integresql -### ----------------------- - -FROM builder as builder-integresql -RUN make build +RUN make go-build ### ----------------------- # --- Stage: integresql @@ -152,7 +197,7 @@ RUN make build # The :debug image provides a busybox shell to enter. # https://github.com/GoogleContainerTools/distroless#debug-images FROM gcr.io/distroless/base-debian11:debug as integresql -COPY --from=builder-integresql /app/bin/integresql / +COPY --from=builder /app/bin/integresql / # Note that cmd is not supported with these kind of images, no shell included # see https://github.com/GoogleContainerTools/distroless/issues/62 # and https://github.com/GoogleContainerTools/distroless#entrypoints diff --git a/Makefile b/Makefile index 8ce960f..4e4701c 100644 --- a/Makefile +++ b/Makefile @@ -4,8 +4,8 @@ # first is default target when running "make" without args build: ##- Default 'make' target: go-format, go-build and lint. - @$(MAKE) format - @$(MAKE) gobuild + @$(MAKE) go-format + @$(MAKE) go-build @$(MAKE) lint # useful to ensure that everything gets resetuped from scratch @@ -22,14 +22,16 @@ info-go: ##- (opt) Prints go.mod updates, module-name and current go version. @go version >> tmp/.info-go @cat tmp/.info-go -format: - go fmt +lint: go-lint ##- Runs golangci-lint and make check-*. -gobuild: - go build -o bin/integresql ./cmd/server +go-format: ##- (opt) Runs go format. + go fmt ./... -lint: - golangci-lint run --fast +go-build: ##- (opt) Runs go build. + go build -ldflags $(LDFLAGS) -o bin/integresql ./cmd/server + +go-lint: ##- (opt) Runs golangci-lint. + golangci-lint run --timeout 5m bench: ##- Run tests, output by package, print coverage. @go test -benchmem=false -run=./... -bench . github.com/allaboutapps/integresql/tests -race -count=4 -v @@ -179,10 +181,6 @@ LDFLAGS = $(eval LDFLAGS := "\ # required to ensure make fails if one recipe fails (even on parallel jobs) and on pipefails .ONESHELL: -# # normal POSIX bash shell mode -# SHELL = /bin/bash -# .SHELLFLAGS = -cEeuo pipefail - -# wrapped make time tracing shell, use it via MAKE_TRACE_TIME=true make -# SHELL = /bin/rksh -# .SHELLFLAGS = $@ \ No newline at end of file +# normal POSIX bash shell mode +SHELL = /bin/bash +.SHELLFLAGS = -cEeuo pipefail diff --git a/cmd/server/main.go b/cmd/server/main.go index f23b3aa..3399fd5 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -2,6 +2,8 @@ package main import ( "context" + "errors" + "fmt" "log" "net/http" "os" @@ -10,10 +12,14 @@ import ( "time" "github.com/allaboutapps/integresql/internal/api" + "github.com/allaboutapps/integresql/internal/config" "github.com/allaboutapps/integresql/internal/router" ) func main() { + + fmt.Println(config.GetFormattedBuildArgs()) + s := api.DefaultServerFromEnv() if err := s.InitManager(context.Background()); err != nil { @@ -35,7 +41,7 @@ func main() { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - if err := s.Shutdown(ctx); err != nil && err != http.ErrServerClosed { + if err := s.Shutdown(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) { log.Fatalf("Failed to gracefully shut down server: %v", err) } } diff --git a/docker-compose.yml b/docker-compose.yml index 9f2de21..70acc3e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,9 +8,23 @@ services: # ports: # - "5000:5000" working_dir: /app + # linux permissions / vscode support: we must explicitly run as the development user + user: development volumes: - - .:/app #:delegated - # - ./.pkg:/go/pkg # enable this to reuse the pkg cache + # mount working directory + # https://code.visualstudio.com/docs/remote/containers-advanced#_update-the-mount-consistency-to-delegated-for-macos + # https://docs.docker.com/docker-for-mac/osxfs-caching/#delegated + # the container’s view is authoritative (permit delays before updates on the container appear in the host) + - .:/app:delegated + + # mount cached go pkg downloads + - go-pkg:/go/pkg + + # mount cached vscode container extensions + # https://code.visualstudio.com/docs/remote/containers-advanced#_avoiding-extension-reinstalls-on-container-rebuild + - vscode-extensions:/home/development/.vscode-server/extensions + - vscode-extensions-insiders:/home/development/.vscode-server-insiders/extensions + depends_on: - postgres environment: &SERVICE_ENV @@ -28,7 +42,13 @@ services: - seccomp:unconfined # Overrides default command so things don't shut down after the process ends. - command: /bin/sh -c "while sleep 1000; do :; done" + # Overrides default command so things don't shut down after the process ends. + command: + - /bin/sh + - -c + - | + git config --global --add safe.directory /app + while sleep 1000; do :; done postgres: image: postgres:12.4-alpine # should be the same version as used in .drone.yml, Dockerfile and live @@ -46,3 +66,11 @@ services: volumes: pgvolume: # declare a named volume to persist DB data + + # go: go mod cached downloads + go-pkg: + + # vscode: Avoiding extension reinstalls on container rebuild + # https://code.visualstudio.com/docs/remote/containers-advanced#_avoiding-extension-reinstalls-on-container-rebuild + vscode-extensions: + vscode-extensions-insiders: diff --git a/internal/api/server.go b/internal/api/server.go index 23f6450..e4a4689 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -8,6 +8,7 @@ import ( "net" "time" + // #nosec G108 - pprof handlers (conditionally made available via http.DefaultServeMux within router) _ "net/http/pprof" "github.com/allaboutapps/integresql/pkg/manager" diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index f32446a..a81d1db 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -2,6 +2,7 @@ package templates import ( "context" + "errors" "net/http" "strconv" "time" @@ -33,14 +34,14 @@ func postInitializeTemplate(s *api.Server) echo.HandlerFunc { template, err := s.Manager.InitializeTemplateDatabase(ctx, payload.Hash) if err != nil { - switch err { - case manager.ErrManagerNotReady: + if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateAlreadyInitialized: + } else if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { return echo.NewHTTPError(http.StatusLocked, "template is already initialized") - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.JSON(http.StatusOK, &template) @@ -55,17 +56,17 @@ func putFinalizeTemplate(s *api.Server) echo.HandlerFunc { defer cancel() if _, err := s.Manager.FinalizeTemplateDatabase(ctx, hash); err != nil { - switch err { - case manager.ErrTemplateAlreadyInitialized: + if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { // template is initialized, we ignore this error return c.NoContent(http.StatusNoContent) - case manager.ErrManagerNotReady: + } else if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateNotFound: + } else if errors.Is(err, manager.ErrTemplateNotFound) { return echo.NewHTTPError(http.StatusNotFound, "template not found") - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.NoContent(http.StatusNoContent) @@ -80,14 +81,14 @@ func deleteDiscardTemplate(s *api.Server) echo.HandlerFunc { defer cancel() if err := s.Manager.DiscardTemplateDatabase(ctx, hash); err != nil { - switch err { - case manager.ErrManagerNotReady: + if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateNotFound: + } else if errors.Is(err, manager.ErrTemplateNotFound) { return echo.NewHTTPError(http.StatusNotFound, "template not found") - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.NoContent(http.StatusNoContent) @@ -103,24 +104,24 @@ func getTestDatabase(s *api.Server) echo.HandlerFunc { test, err := s.Manager.GetTestDatabase(ctx, hash) if err != nil { - switch err { - case manager.ErrManagerNotReady: + + if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateNotFound: + } else if errors.Is(err, manager.ErrTemplateNotFound) { return echo.NewHTTPError(http.StatusNotFound, "template not found") - case manager.ErrTemplateDiscarded: + } else if errors.Is(err, manager.ErrTemplateDiscarded) { return echo.NewHTTPError(http.StatusGone, "template was just discarded") - case pool.ErrPoolFull: - return echo.NewHTTPError(http.StatusInsufficientStorage, "pool is full and can't be extended") - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.JSON(http.StatusOK, &test) } } +// deprecated func deleteReturnTestDatabase(s *api.Server) echo.HandlerFunc { return postUnlockTestDatabase(s) } @@ -137,18 +138,18 @@ func postUnlockTestDatabase(s *api.Server) echo.HandlerFunc { defer cancel() if err := s.Manager.ReturnTestDatabase(ctx, hash, id); err != nil { - switch err { - case manager.ErrManagerNotReady: + if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateNotFound: + } else if errors.Is(err, manager.ErrTemplateNotFound) { return echo.NewHTTPError(http.StatusNotFound, "template not found") - case manager.ErrTestNotFound: + } else if errors.Is(err, manager.ErrTestNotFound) { return echo.NewHTTPError(http.StatusNotFound, "test database not found") - case pool.ErrTestDBInUse: + } else if errors.Is(err, pool.ErrTestDBInUse) { return echo.NewHTTPError(http.StatusLocked, pool.ErrTestDBInUse.Error()) - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.NoContent(http.StatusNoContent) @@ -164,18 +165,19 @@ func postRecreateTestDatabase(s *api.Server) echo.HandlerFunc { } if err := s.Manager.RecreateTestDatabase(c.Request().Context(), hash, id); err != nil { - switch err { - case manager.ErrManagerNotReady: + + if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable - case manager.ErrTemplateNotFound: + } else if errors.Is(err, manager.ErrTemplateNotFound) { return echo.NewHTTPError(http.StatusNotFound, "template not found") - case manager.ErrTestNotFound: + } else if errors.Is(err, manager.ErrTestNotFound) { return echo.NewHTTPError(http.StatusNotFound, "test database not found") - case pool.ErrTestDBInUse: + } else if errors.Is(err, pool.ErrTestDBInUse) { return echo.NewHTTPError(http.StatusLocked, pool.ErrTestDBInUse.Error()) - default: - return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } + + // default 500 + return echo.NewHTTPError(http.StatusInternalServerError, err.Error()) } return c.NoContent(http.StatusNoContent) diff --git a/internal/config/build_args.go b/internal/config/build_args.go new file mode 100644 index 0000000..7972bc2 --- /dev/null +++ b/internal/config/build_args.go @@ -0,0 +1,18 @@ +package config + +import "fmt" + +// The following vars are automatically injected via -ldflags. +// See Makefile target "make go-build" and make var $(LDFLAGS). +// No need to change them here. +// https://www.digitalocean.com/community/tutorials/using-ldflags-to-set-version-information-for-go-applications +var ( + ModuleName = "build.local/misses/ldflags" // e.g. "allaboutapps.dev/aw/go-starter" + Commit = "< 40 chars git commit hash via ldflags >" // e.g. "59cb7684dd0b0f38d68cd7db657cb614feba8f7e" + BuildDate = "1970-01-01T00:00:00+00:00" // e.g. "1970-01-01T00:00:00+00:00" +) + +// GetFormattedBuildArgs returns string representation of buildsargs set via ldflags " @ ()" +func GetFormattedBuildArgs() string { + return fmt.Sprintf("%v @ %v (%v)", ModuleName, Commit, BuildDate) +} diff --git a/pkg/db/database_config_test.go b/pkg/db/database_config_internal_test.go similarity index 100% rename from pkg/db/database_config_test.go rename to pkg/db/database_config_internal_test.go diff --git a/pkg/manager/helpers_test.go b/pkg/manager/helpers_test.go index 27c5048..00baa06 100644 --- a/pkg/manager/helpers_test.go +++ b/pkg/manager/helpers_test.go @@ -51,7 +51,7 @@ func disconnectManager(t *testing.T, m *manager.Manager) { } -func initTemplateDB(ctx context.Context, errs chan<- error, m *manager.Manager) { +func initTemplateDB(_ context.Context, errs chan<- error, m *manager.Manager) { template, err := m.InitializeTemplateDatabase(context.Background(), "hashinghash") if err != nil { @@ -159,7 +159,7 @@ func verifyTestDB(t *testing.T, test db.TestDatabase) { } } -func getTestDB(ctx context.Context, errs chan<- error, m *manager.Manager) { +func getTestDB(_ context.Context, errs chan<- error, m *manager.Manager) { _, err := m.GetTestDatabase(context.Background(), "hashinghash") errs <- err diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 355fd1e..24f6218 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -98,7 +98,7 @@ func (m *Manager) Connect(ctx context.Context) error { return nil } -func (m *Manager) Disconnect(ctx context.Context, ignoreCloseError bool) error { +func (m *Manager) Disconnect(_ context.Context, ignoreCloseError bool) error { if m.db == nil { return errors.New("manager is not connected") } @@ -398,25 +398,7 @@ func (m Manager) ResetAllTracking(ctx context.Context) error { // remove all templates to disallow any new test DB creation from existing templates m.templates.RemoveAll(ctx) - if err := m.pool.RemoveAll(ctx, m.dropTestPoolDB); err != nil { - return err - } - - return nil -} - -func (m Manager) dropDatabaseWithID(ctx context.Context, hash string, id int) error { - dbName := m.pool.MakeDBName(hash, id) - exists, err := m.checkDatabaseExists(ctx, dbName) - if err != nil { - return err - } - - if !exists { - return ErrTestNotFound - } - - return m.dropDatabase(ctx, dbName) + return m.pool.RemoveAll(ctx, m.dropTestPoolDB) } func (m Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, error) { diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 4053003..24c56e7 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -9,7 +9,8 @@ import ( "github.com/allaboutapps/integresql/pkg/util" ) -type ManagerConfig struct { +// we explicitly want to access this struct via manager.ManagerConfig, thus we disable revive for the next line +type ManagerConfig struct { //nolint:revive ManagerDatabaseConfig db.DatabaseConfig TemplateDatabaseTemplate string diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 676bb43..a2488cf 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -129,7 +129,7 @@ func TestManagerInitializeTemplateDatabaseTimeout(t *testing.T) { defer cancel() _, err := m.InitializeTemplateDatabase(ctxt, hash) - if err != context.DeadlineExceeded { + if !errors.Is(err, context.DeadlineExceeded) { t.Fatalf("received unexpected error, got %v, want %v", err, context.DeadlineExceeded) } } @@ -173,7 +173,7 @@ func TestManagerInitializeTemplateDatabaseConcurrently(t *testing.T) { if err == nil { success++ } else { - if err == manager.ErrTemplateAlreadyInitialized { + if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { failed++ } else { errored++ @@ -384,7 +384,10 @@ func TestManagerFinalizeTemplateAndGetTestDatabaseConcurrently(t *testing.T) { return nil }) - g.Wait() + if err := g.Wait(); err != nil { + t.Fatal(err) + } + first := <-testCh assert.Equal(t, "FINALIZE", first) } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 6b82a00..a7a16c7 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -66,7 +66,7 @@ type HashPool struct { sync.RWMutex wg sync.WaitGroup - tasksChan chan string + tasksChan chan workerTask running bool workerContext context.Context // the ctx all background workers will receive (nil if not yet started) } @@ -89,7 +89,7 @@ func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFu templateDB: templateDB, PoolConfig: cfg, - tasksChan: make(chan string, cfg.MaxPoolSize+1), + tasksChan: make(chan workerTask, cfg.MaxPoolSize+1), running: false, } @@ -194,15 +194,11 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout return testDB.TestDatabase, nil } -func (pool *HashPool) AddTestDatabase(ctx context.Context, templateDB db.Database) error { - return pool.extend(ctx) -} - -func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string, MaxParallelTasks int) { +func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan workerTask, MaxParallelTasks int) { fmt.Printf("pool#%s: workerTaskLoop\n", pool.templateDB.TemplateHash) - handlers := map[string]func(ctx context.Context) error{ + handlers := map[workerTask]func(ctx context.Context) error{ workerTaskExtend: ignoreErrs(pool.extend, ErrPoolFull, context.Canceled), workerTaskAutoCleanDirty: ignoreErrs(pool.autoCleanDirty, context.Canceled), } @@ -224,7 +220,7 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan string } pool.wg.Add(1) - go func(task string) { + go func(task workerTask) { defer func() { pool.wg.Done() @@ -248,7 +244,7 @@ func (pool *HashPool) controlLoop(ctx context.Context, cancel context.CancelFunc // ctx, cancel := context.WithCancel(context.Background()) defer cancel() - workerTasksChan := make(chan string, len(pool.tasksChan)) + workerTasksChan := make(chan workerTask, len(pool.tasksChan)) pool.wg.Add(1) go func() { defer pool.wg.Done() @@ -272,10 +268,15 @@ func (pool *HashPool) controlLoop(ctx context.Context, cancel context.CancelFunc } // ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). -func (pool *HashPool) ReturnTestDatabase(ctx context.Context, hash string, id int) error { +func (pool *HashPool) ReturnTestDatabase(ctx context.Context, id int) error { pool.Lock() defer pool.Unlock() + if ctx.Err() != nil { + // client vanished + return ctx.Err() + } + if id < 0 || id >= len(pool.dbs) { return ErrInvalidIndex } @@ -330,7 +331,7 @@ func (pool *HashPool) excludeIDFromDirtyChannel(id int) { } // RecreateTestDatabase prioritizes the test DB to be recreated next via the dirty worker. -func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id int) error { +func (pool *HashPool) RecreateTestDatabase(ctx context.Context, id int) error { pool.RLock() if id < 0 || id >= len(pool.dbs) { @@ -341,10 +342,17 @@ func (pool *HashPool) RecreateTestDatabase(ctx context.Context, hash string, id fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (RecreateTestDatabase %v)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize, id) pool.RUnlock() + if ctx.Err() != nil { + // client vanished + return ctx.Err() + } + // exclude from the normal dirty channel, force recreation in a background worker... pool.excludeIDFromDirtyChannel(id) // directly spawn a new worker in the bg (with the same ctx as the typical workers) + // note that this runs unchained, meaning we do not care about errors that may happen via this bg task + //nolint:errcheck go pool.recreateDatabaseGracefully(pool.workerContext, id) return nil diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index 1a91a98..3f68770 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -13,7 +13,8 @@ import ( var ErrUnknownHash = errors.New("no database pool exists for this hash") -type PoolConfig struct { +// we explicitly want to access this struct via pool.PoolConfig, thus we disable revive for the next line +type PoolConfig struct { //nolint:revive InitialPoolSize int // Initial number of ready DBs prepared in background MaxPoolSize int // Maximal pool size that won't be exceeded TestDBNamePrefix string // Test-Database prefix: DatabasePrefix_TestDBNamePrefix_HASH_ID @@ -25,7 +26,8 @@ type PoolConfig struct { DisableWorkerAutostart bool // test only flag for starting without background worker task system } -type PoolCollection struct { +// we explicitly want to access this struct via pool.PoolCollection, thus we disable revive for the next line +type PoolCollection struct { //nolint:revive PoolConfig pools map[string]*HashPool // map[hash] @@ -56,7 +58,7 @@ func makeActualRecreateTestDBFunc(templateName string, userRecreateFunc Recreate type recreateTestDBFunc func(context.Context, *existingDB) error // InitHashPool creates a new pool with a given template hash and starts the cleanup workers. -func (p *PoolCollection) InitHashPool(ctx context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { +func (p *PoolCollection) InitHashPool(_ context.Context, templateDB db.Database, initDBFunc RecreateDBFunc) { p.mutex.Lock() defer p.mutex.Unlock() @@ -106,20 +108,6 @@ func (p *PoolCollection) GetTestDatabase(ctx context.Context, hash string, timeo return pool.GetTestDatabase(ctx, hash, timeout) } -// AddTestDatabase adds a new test DB to the pool and creates it according to the template. -// The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. -// If the pool size has already reached MAX, ErrPoolFull is returned. -func (p *PoolCollection) AddTestDatabase(ctx context.Context, templateDB db.Database) error { - hash := templateDB.TemplateHash - - pool, err := p.getPool(ctx, hash) - if err != nil { - return err - } - - return pool.AddTestDatabase(ctx, templateDB) -} - // ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). func (p *PoolCollection) ReturnTestDatabase(ctx context.Context, hash string, id int) error { pool, err := p.getPool(ctx, hash) @@ -127,7 +115,7 @@ func (p *PoolCollection) ReturnTestDatabase(ctx context.Context, hash string, id return err } - return pool.ReturnTestDatabase(ctx, hash, id) + return pool.ReturnTestDatabase(ctx, id) } // RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. @@ -137,7 +125,7 @@ func (p *PoolCollection) RecreateTestDatabase(ctx context.Context, hash string, return err } - return pool.RecreateTestDatabase(ctx, hash, id) + return pool.RecreateTestDatabase(ctx, id) } // RemoveAllWithHash removes a pool with a given template hash. @@ -218,3 +206,18 @@ func (p *PoolCollection) getPoolLockCollection(ctx context.Context, hash string) return pool, unlock, err } + +// extend is only used for internal testing! +// it adds a new test DB to the pool and creates it according to the template. +// The new test DB is marked as 'Ready' and can be picked up with GetTestDatabase. +// If the pool size has already reached MAX, ErrPoolFull is returned. +func (p *PoolCollection) extend(ctx context.Context, templateDB db.Database) error { + hash := templateDB.TemplateHash + + pool, err := p.getPool(ctx, hash) + if err != nil { + return err + } + + return pool.extend(ctx) +} diff --git a/pkg/pool/pool_collection_test.go b/pkg/pool/pool_collection_internal_test.go similarity index 86% rename from pkg/pool/pool_collection_test.go rename to pkg/pool/pool_collection_internal_test.go index 90c0c05..cb396f8 100644 --- a/pkg/pool/pool_collection_test.go +++ b/pkg/pool/pool_collection_internal_test.go @@ -1,4 +1,4 @@ -package pool_test +package pool import ( "context" @@ -7,7 +7,6 @@ import ( "time" "github.com/allaboutapps/integresql/pkg/db" - "github.com/allaboutapps/integresql/pkg/pool" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -16,13 +15,13 @@ func TestPoolAddGet(t *testing.T) { t.Parallel() ctx := context.Background() - cfg := pool.PoolConfig{ + cfg := PoolConfig{ MaxPoolSize: 2, MaxParallelTasks: 4, TestDBNamePrefix: "prefix_", DisableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! } - p := pool.NewPoolCollection(cfg) + p := NewPoolCollection(cfg) hash1 := "h1" hash2 := "h2" @@ -43,10 +42,10 @@ func TestPoolAddGet(t *testing.T) { // get from empty (just initialized) _, err := p.GetTestDatabase(ctx, hash1, 0) - assert.Error(t, err, pool.ErrTimeout) + assert.Error(t, err, ErrTimeout) // add a new one - assert.NoError(t, p.AddTestDatabase(ctx, templateDB)) + assert.NoError(t, p.extend(ctx, templateDB)) // get it testDB, err := p.GetTestDatabase(ctx, hash1, 1*time.Second) assert.NoError(t, err) @@ -57,13 +56,13 @@ func TestPoolAddGet(t *testing.T) { templateDB2 := templateDB templateDB2.TemplateHash = hash2 p.InitHashPool(ctx, templateDB2, initFunc) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) - assert.ErrorIs(t, p.AddTestDatabase(ctx, templateDB2), pool.ErrPoolFull) + assert.NoError(t, p.extend(ctx, templateDB2)) + assert.NoError(t, p.extend(ctx, templateDB2)) + assert.ErrorIs(t, p.extend(ctx, templateDB2), ErrPoolFull) // get from empty h1 _, err = p.GetTestDatabase(ctx, hash1, 100*time.Millisecond) - assert.ErrorIs(t, err, pool.ErrTimeout) + assert.ErrorIs(t, err, ErrTimeout) // get from h2 testDB1, err := p.GetTestDatabase(ctx, hash2, 1*time.Second) @@ -93,13 +92,13 @@ func TestPoolAddGetConcurrent(t *testing.T) { } maxPoolSize := 15 - cfg := pool.PoolConfig{ + cfg := PoolConfig{ MaxPoolSize: maxPoolSize, InitialPoolSize: maxPoolSize, MaxParallelTasks: 4, TestDBNamePrefix: "", } - p := pool.NewPoolCollection(cfg) + p := NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) var wg sync.WaitGroup @@ -148,12 +147,12 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { return nil } - cfg := pool.PoolConfig{ + cfg := PoolConfig{ MaxPoolSize: 40, MaxParallelTasks: 4, TestDBNamePrefix: "", } - p := pool.NewPoolCollection(cfg) + p := NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) p.InitHashPool(ctx, templateDB1, initFunc) @@ -163,8 +162,8 @@ func TestPoolAddGetReturnConcurrent(t *testing.T) { // add DBs sequentially for i := 0; i < cfg.MaxPoolSize/4; i++ { - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) + assert.NoError(t, p.extend(ctx, templateDB1)) + assert.NoError(t, p.extend(ctx, templateDB2)) } // stop the workers to prevent auto cleaning in background @@ -211,11 +210,11 @@ func TestPoolRemoveAll(t *testing.T) { return nil } - cfg := pool.PoolConfig{ + cfg := PoolConfig{ MaxPoolSize: 6, MaxParallelTasks: 4, } - p := pool.NewPoolCollection(cfg) + p := NewPoolCollection(cfg) t.Cleanup(func() { p.Stop() }) p.InitHashPool(ctx, templateDB1, initFunc) @@ -223,8 +222,8 @@ func TestPoolRemoveAll(t *testing.T) { // add DBs sequentially for i := 0; i < cfg.MaxPoolSize; i++ { - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB2)) + assert.NoError(t, p.extend(ctx, templateDB1)) + assert.NoError(t, p.extend(ctx, templateDB2)) } // remove all @@ -232,13 +231,13 @@ func TestPoolRemoveAll(t *testing.T) { // try to get _, err := p.GetTestDatabase(ctx, hash1, 0) - assert.Error(t, err, pool.ErrTimeout) + assert.Error(t, err, ErrTimeout) _, err = p.GetTestDatabase(ctx, hash2, 0) - assert.Error(t, err, pool.ErrTimeout) + assert.Error(t, err, ErrTimeout) // start using pool again p.InitHashPool(ctx, templateDB1, initFunc) - assert.NoError(t, p.AddTestDatabase(ctx, templateDB1)) + assert.NoError(t, p.extend(ctx, templateDB1)) testDB, err := p.GetTestDatabase(ctx, hash1, 1*time.Second) assert.NoError(t, err) assert.Equal(t, 0, testDB.ID) @@ -262,13 +261,13 @@ func TestPoolReuseDirty(t *testing.T) { } maxPoolSize := 40 - cfg := pool.PoolConfig{ + cfg := PoolConfig{ MaxPoolSize: maxPoolSize, InitialPoolSize: maxPoolSize, MaxParallelTasks: 1, TestDBNamePrefix: "test_", } - p := pool.NewPoolCollection(cfg) + p := NewPoolCollection(cfg) p.InitHashPool(ctx, templateDB1, initFunc) t.Cleanup(func() { p.Stop() }) @@ -321,23 +320,23 @@ func TestPoolReturnTestDatabase(t *testing.T) { return nil } - cfg := pool.PoolConfig{ + cfg := PoolConfig{ MaxPoolSize: 10, MaxParallelTasks: 3, DisableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! } - p := pool.NewPoolCollection(cfg) + p := NewPoolCollection(cfg) p.InitHashPool(ctx, templateDB1, initFunc) // add just one test DB - require.NoError(t, p.AddTestDatabase(ctx, templateDB1)) + require.NoError(t, p.extend(ctx, templateDB1)) testDB1, err := p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) assert.NoError(t, err) // assert that workers are stopped and no new DB showed up _, err = p.GetTestDatabase(ctx, templateDB1.TemplateHash, time.Millisecond) - assert.ErrorIs(t, err, pool.ErrTimeout) + assert.ErrorIs(t, err, ErrTimeout) // return and get the same one assert.NoError(t, p.ReturnTestDatabase(ctx, hash1, testDB1.ID)) diff --git a/pkg/templates/template.go b/pkg/templates/template.go index 1911234..79a3e97 100644 --- a/pkg/templates/template.go +++ b/pkg/templates/template.go @@ -41,7 +41,7 @@ func NewTemplate(hash string, config TemplateConfig) *Template { return t } -func (t *Template) GetConfig(ctx context.Context) TemplateConfig { +func (t *Template) GetConfig(_ context.Context) TemplateConfig { t.mutex.RLock() defer t.mutex.RUnlock() @@ -49,7 +49,7 @@ func (t *Template) GetConfig(ctx context.Context) TemplateConfig { } // GetState locks the template and checks its state. -func (t *Template) GetState(ctx context.Context) TemplateState { +func (t *Template) GetState(_ context.Context) TemplateState { t.mutex.RLock() defer t.mutex.RUnlock() @@ -94,18 +94,18 @@ func (t *Template) WaitUntilFinalized(ctx context.Context, timeout time.Duration // GetStateWithLock gets the current state leaving the template locked. // REMEMBER to unlock it when you no longer need it locked. -func (t *Template) GetStateWithLock(ctx context.Context) (TemplateState, lockedTemplate) { +func (t *Template) GetStateWithLock(_ context.Context) (TemplateState, LockedTemplate) { t.mutex.Lock() - return t.state, lockedTemplate{t: t} + return t.state, LockedTemplate{t: t} } -type lockedTemplate struct { +type LockedTemplate struct { t *Template } // Unlock releases the locked template. -func (l *lockedTemplate) Unlock() { +func (l *LockedTemplate) Unlock() { if l.t != nil { l.t.mutex.Unlock() l.t = nil @@ -113,7 +113,7 @@ func (l *lockedTemplate) Unlock() { } // SetState sets a new state of the locked template (without acquiring the lock again). -func (l lockedTemplate) SetState(ctx context.Context, newState TemplateState) { +func (l LockedTemplate) SetState(_ context.Context, newState TemplateState) { if l.t.state == newState { return } diff --git a/pkg/templates/template_collection.go b/pkg/templates/template_collection.go index 1d4f31f..4769a91 100644 --- a/pkg/templates/template_collection.go +++ b/pkg/templates/template_collection.go @@ -81,7 +81,7 @@ func (tc *Collection) Get(ctx context.Context, hash string) (template *Template, } // RemoveUnsafe removes the template and can be called ONLY IF THE COLLECTION IS LOCKED. -func (tc *Collection) RemoveUnsafe(ctx context.Context, hash string) { +func (tc *Collection) RemoveUnsafe(_ context.Context, hash string) { delete(tc.templates, hash) } diff --git a/pkg/templates/template_test.go b/pkg/templates/template_test.go index 3415383..128c5dd 100644 --- a/pkg/templates/template_test.go +++ b/pkg/templates/template_test.go @@ -2,7 +2,6 @@ package templates_test import ( "context" - "errors" "fmt" "sync" "testing" @@ -48,7 +47,7 @@ func TestForReady(t *testing.T) { timeout := 1 * time.Second state := t1.WaitUntilFinalized(ctx, timeout) if state != templates.TemplateStateFinalized { - errsChan <- errors.New(fmt.Sprintf("expected state %v (finalized), but is %v", templates.TemplateStateFinalized, state)) + errsChan <- fmt.Errorf("expected state %v (finalized), but is %v", templates.TemplateStateFinalized, state) } }() } @@ -61,7 +60,7 @@ func TestForReady(t *testing.T) { timeout := 30 * time.Millisecond state := t1.WaitUntilFinalized(ctx, timeout) if state != templates.TemplateStateInit { - errsChan <- errors.New(fmt.Sprintf("expected state %v (init), but is %v", templates.TemplateStateInit, state)) + errsChan <- fmt.Errorf("expected state %v (init), but is %v", templates.TemplateStateInit, state) } }() } diff --git a/pkg/util/retry.go b/pkg/util/retry.go index 1f629c7..ac5fb18 100644 --- a/pkg/util/retry.go +++ b/pkg/util/retry.go @@ -17,5 +17,5 @@ func Retry(attempts int, sleep time.Duration, f func() error) error { time.Sleep(sleep) } - return fmt.Errorf("failing after %d attempts, lat error: %v", attempts, err) + return fmt.Errorf("failing after %d attempts, lat error: %w", attempts, err) } diff --git a/tests/testclient/client.go b/tests/testclient/client.go index 686c204..503a241 100644 --- a/tests/testclient/client.go +++ b/tests/testclient/client.go @@ -8,6 +8,7 @@ import ( "context" "database/sql" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -16,6 +17,8 @@ import ( "github.com/allaboutapps/integresql/pkg/manager" "github.com/allaboutapps/integresql/pkg/util" + + // Import postgres driver for database/sql package _ "github.com/lib/pq" ) @@ -124,7 +127,7 @@ func (c *Client) SetupTemplate(ctx context.Context, hash string, init func(conn } return c.FinalizeTemplate(ctx, hash) - } else if err == manager.ErrTemplateAlreadyInitialized { + } else if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { return nil } else { return err From 1e64a0c859408333c0ef049938e352ab599c2119 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Mon, 4 Sep 2023 17:44:07 +0200 Subject: [PATCH 142/160] private disableWorkerAutostart --- pkg/pool/pool_collection.go | 4 ++-- pkg/pool/pool_collection_internal_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index 3f68770..d4ccd46 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -23,7 +23,7 @@ type PoolConfig struct { //nolint:revive TestDatabaseRetryRecreateSleepMax time.Duration // ... the maximum possible sleep time between retries (e.g. 3 seconds) is reached. TestDatabaseMinimalLifetime time.Duration // After a testdatabase transitions from ready to dirty, always block auto-recreation for this duration (except manual recreate). - DisableWorkerAutostart bool // test only flag for starting without background worker task system + disableWorkerAutostart bool // test only private flag for starting without background worker task system } // we explicitly want to access this struct via pool.PoolCollection, thus we disable revive for the next line @@ -67,7 +67,7 @@ func (p *PoolCollection) InitHashPool(_ context.Context, templateDB db.Database, // Create a new HashPool pool := NewHashPool(cfg, templateDB, initDBFunc) - if !cfg.DisableWorkerAutostart { + if !cfg.disableWorkerAutostart { pool.Start() } diff --git a/pkg/pool/pool_collection_internal_test.go b/pkg/pool/pool_collection_internal_test.go index cb396f8..a843d0c 100644 --- a/pkg/pool/pool_collection_internal_test.go +++ b/pkg/pool/pool_collection_internal_test.go @@ -19,7 +19,7 @@ func TestPoolAddGet(t *testing.T) { MaxPoolSize: 2, MaxParallelTasks: 4, TestDBNamePrefix: "prefix_", - DisableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! + disableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! } p := NewPoolCollection(cfg) @@ -323,7 +323,7 @@ func TestPoolReturnTestDatabase(t *testing.T) { cfg := PoolConfig{ MaxPoolSize: 10, MaxParallelTasks: 3, - DisableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! + disableWorkerAutostart: true, // no extend / cleanDirty tasks should run automatically! } p := NewPoolCollection(cfg) From 34fdd8e7d4a791322e227e16d08eee433700dbc5 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 5 Sep 2023 15:35:01 +0200 Subject: [PATCH 143/160] intro zerolog and add proper log statements in pool and manager, into typical go-starter logging middleware and additional env vars for controlling them --- cmd/server/main.go | 28 ++- go.mod | 1 + go.sum | 7 + internal/api/middleware/logger.go | 307 ++++++++++++++++++++++++++++++ internal/api/server_config.go | 48 ++++- internal/router/echo_logger.go | 13 ++ internal/router/router.go | 53 +++++- internal/router/router_test.go | 35 ++++ internal/test/helper_request.go | 132 +++++++++++++ internal/test/test_server.go | 62 ++++++ pkg/manager/manager.go | 122 +++++++++--- pkg/manager/manager_config.go | 4 +- pkg/manager/manager_test.go | 4 +- pkg/pool/pool.go | 181 ++++++++++++------ pkg/pool/pool_collection.go | 2 +- pkg/util/context.go | 58 ++++++ pkg/util/log.go | 42 ++++ pkg/util/log_test.go | 20 ++ 18 files changed, 1022 insertions(+), 97 deletions(-) create mode 100644 internal/api/middleware/logger.go create mode 100644 internal/router/echo_logger.go create mode 100644 internal/router/router_test.go create mode 100644 internal/test/helper_request.go create mode 100644 internal/test/test_server.go create mode 100644 pkg/util/context.go create mode 100644 pkg/util/log.go create mode 100644 pkg/util/log_test.go diff --git a/cmd/server/main.go b/cmd/server/main.go index 3399fd5..9ab58f2 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -3,8 +3,6 @@ package main import ( "context" "errors" - "fmt" - "log" "net/http" "os" "os/signal" @@ -14,23 +12,39 @@ import ( "github.com/allaboutapps/integresql/internal/api" "github.com/allaboutapps/integresql/internal/config" "github.com/allaboutapps/integresql/internal/router" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) func main() { - fmt.Println(config.GetFormattedBuildArgs()) + cfg := api.DefaultServerConfigFromEnv() - s := api.DefaultServerFromEnv() + zerolog.TimeFieldFormat = time.RFC3339Nano + zerolog.SetGlobalLevel(cfg.Logger.Level) + if cfg.Logger.PrettyPrintConsole { + log.Logger = log.Output(zerolog.NewConsoleWriter(func(w *zerolog.ConsoleWriter) { + w.TimeFormat = "15:04:05" + })) + } + + log.Info().Str("version", config.GetFormattedBuildArgs()).Msg("starting...") + + s := api.NewServer(cfg) if err := s.InitManager(context.Background()); err != nil { - log.Fatalf("Failed to initialize manager: %v", err) + log.Fatal().Err(err).Msg("Failed to initialize manager") } router.Init(s) go func() { if err := s.Start(); err != nil { - log.Fatalf("Failed to start server: %v", err) + if errors.Is(err, http.ErrServerClosed) { + log.Info().Msg("Server closed") + } else { + log.Fatal().Err(err).Msg("Failed to start server") + } } }() @@ -42,6 +56,6 @@ func main() { defer cancel() if err := s.Shutdown(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) { - log.Fatalf("Failed to gracefully shut down server: %v", err) + log.Fatal().Err(err).Msg("Failed to gracefully shut down server") } } diff --git a/go.mod b/go.mod index 4f748c6..b4e6479 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/google/uuid v1.3.0 github.com/labstack/echo/v4 v4.10.2 github.com/lib/pq v1.10.9 + github.com/rs/zerolog v1.28.0 github.com/stretchr/testify v1.8.4 golang.org/x/sync v0.3.0 ) diff --git a/go.sum b/go.sum index 5a270e2..3701fcb 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,8 @@ +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= @@ -17,14 +19,19 @@ github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3 github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= +github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= diff --git a/internal/api/middleware/logger.go b/internal/api/middleware/logger.go new file mode 100644 index 0000000..7135b5b --- /dev/null +++ b/internal/api/middleware/logger.go @@ -0,0 +1,307 @@ +package middleware + +import ( + "bufio" + "bytes" + "context" + "io" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/allaboutapps/integresql/pkg/util" + "github.com/labstack/echo/v4" + "github.com/labstack/echo/v4/middleware" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +// RequestBodyLogSkipper defines a function to skip logging certain request bodies. +// Returning true skips logging the payload of the request. +type RequestBodyLogSkipper func(req *http.Request) bool + +// DefaultRequestBodyLogSkipper returns true for all requests with Content-Type +// application/x-www-form-urlencoded or multipart/form-data as those might contain +// binary or URL-encoded file uploads unfit for logging purposes. +func DefaultRequestBodyLogSkipper(req *http.Request) bool { + contentType := req.Header.Get(echo.HeaderContentType) + switch { + case strings.HasPrefix(contentType, echo.MIMEApplicationForm), + strings.HasPrefix(contentType, echo.MIMEMultipartForm): + return true + default: + return false + } +} + +// ResponseBodyLogSkipper defines a function to skip logging certain response bodies. +// Returning true skips logging the payload of the response. +type ResponseBodyLogSkipper func(req *http.Request, res *echo.Response) bool + +// DefaultResponseBodyLogSkipper returns false for all responses with Content-Type +// application/json, preventing logging for all other types of payloads as those +// might contain binary or URL-encoded data unfit for logging purposes. +func DefaultResponseBodyLogSkipper(_ *http.Request, res *echo.Response) bool { + contentType := res.Header().Get(echo.HeaderContentType) + switch { + case strings.HasPrefix(contentType, echo.MIMEApplicationJSON): + return false + default: + return true + } +} + +// BodyLogReplacer defines a function to replace certain parts of a body before logging it, +// mainly used to strip sensitive information from a request or response payload. +// The []byte returned should contain a sanitized payload ready for logging. +type BodyLogReplacer func(body []byte) []byte + +// DefaultBodyLogReplacer returns the body received without any modifications. +func DefaultBodyLogReplacer(body []byte) []byte { + return body +} + +// HeaderLogReplacer defines a function to replace certain parts of a header before logging it, +// mainly used to strip sensitive information from a request or response header. +// The http.Header returned should be a sanitized copy of the original header as not to modify +// the request or response while logging. +type HeaderLogReplacer func(header http.Header) http.Header + +// DefaultHeaderLogReplacer replaces all Authorization, X-CSRF-Token and Proxy-Authorization +// header entries with a redacted string, indicating their presence without revealing actual, +// potentially sensitive values in the logs. +func DefaultHeaderLogReplacer(header http.Header) http.Header { + sanitizedHeader := http.Header{} + + for k, vv := range header { + shouldRedact := strings.EqualFold(k, echo.HeaderAuthorization) || + strings.EqualFold(k, echo.HeaderXCSRFToken) || + strings.EqualFold(k, "Proxy-Authorization") + + for _, v := range vv { + if shouldRedact { + sanitizedHeader.Add(k, "*****REDACTED*****") + } else { + sanitizedHeader.Add(k, v) + } + } + } + + return sanitizedHeader +} + +// QueryLogReplacer defines a function to replace certain parts of a URL query before logging it, +// mainly used to strip sensitive information from a request query. +// The url.Values returned should be a sanitized copy of the original query as not to modify the +// request while logging. +type QueryLogReplacer func(query url.Values) url.Values + +// DefaultQueryLogReplacer returns the query received without any modifications. +func DefaultQueryLogReplacer(query url.Values) url.Values { + return query +} + +var ( + DefaultLoggerConfig = LoggerConfig{ + Skipper: middleware.DefaultSkipper, + Level: zerolog.DebugLevel, + LogRequestBody: false, + LogRequestHeader: false, + LogRequestQuery: false, + RequestBodyLogSkipper: DefaultRequestBodyLogSkipper, + RequestBodyLogReplacer: DefaultBodyLogReplacer, + RequestHeaderLogReplacer: DefaultHeaderLogReplacer, + RequestQueryLogReplacer: DefaultQueryLogReplacer, + LogResponseBody: false, + LogResponseHeader: false, + ResponseBodyLogSkipper: DefaultResponseBodyLogSkipper, + ResponseBodyLogReplacer: DefaultBodyLogReplacer, + } +) + +type LoggerConfig struct { + Skipper middleware.Skipper + Level zerolog.Level + LogRequestBody bool + LogRequestHeader bool + LogRequestQuery bool + RequestBodyLogSkipper RequestBodyLogSkipper + RequestBodyLogReplacer BodyLogReplacer + RequestHeaderLogReplacer HeaderLogReplacer + RequestQueryLogReplacer QueryLogReplacer + LogResponseBody bool + LogResponseHeader bool + ResponseBodyLogSkipper ResponseBodyLogSkipper + ResponseBodyLogReplacer BodyLogReplacer + ResponseHeaderLogReplacer HeaderLogReplacer +} + +// Logger with default logger output and configuration +func Logger() echo.MiddlewareFunc { + return LoggerWithConfig(DefaultLoggerConfig, nil) +} + +// LoggerWithConfig returns a new MiddlewareFunc which creates a logger with the desired configuration. +// If output is set to nil, the default output is used. If more output params are provided, the first is being used. +func LoggerWithConfig(config LoggerConfig, output ...io.Writer) echo.MiddlewareFunc { + if config.Skipper == nil { + config.Skipper = DefaultLoggerConfig.Skipper + } + if config.RequestBodyLogSkipper == nil { + config.RequestBodyLogSkipper = DefaultRequestBodyLogSkipper + } + if config.RequestBodyLogReplacer == nil { + config.RequestBodyLogReplacer = DefaultBodyLogReplacer + } + if config.RequestHeaderLogReplacer == nil { + config.RequestHeaderLogReplacer = DefaultHeaderLogReplacer + } + if config.RequestQueryLogReplacer == nil { + config.RequestQueryLogReplacer = DefaultQueryLogReplacer + } + if config.ResponseBodyLogSkipper == nil { + config.ResponseBodyLogSkipper = DefaultResponseBodyLogSkipper + } + if config.ResponseBodyLogReplacer == nil { + config.ResponseBodyLogReplacer = DefaultBodyLogReplacer + } + if config.ResponseHeaderLogReplacer == nil { + config.ResponseHeaderLogReplacer = DefaultHeaderLogReplacer + } + + return func(next echo.HandlerFunc) echo.HandlerFunc { + return func(c echo.Context) error { + if config.Skipper(c) { + return next(c) + } + + req := c.Request() + res := c.Response() + + id := req.Header.Get(echo.HeaderXRequestID) + if len(id) == 0 { + id = res.Header().Get(echo.HeaderXRequestID) + } + + in := req.Header.Get(echo.HeaderContentLength) + if len(in) == 0 { + in = "0" + } + + l := log.With(). + Dict("req", zerolog.Dict(). + Str("id", id). + Str("host", req.Host). + Str("method", req.Method). + Str("url", req.URL.String()). + Str("bytes_in", in), + ).Logger() + + if len(output) > 0 { + l = l.Output(output[0]) + } + + le := l.WithLevel(config.Level) + req = req.WithContext(l.WithContext(context.WithValue(req.Context(), util.CTXKeyRequestID, id))) + + if config.LogRequestBody && !config.RequestBodyLogSkipper(req) { + var reqBody []byte + var err error + if req.Body != nil { + reqBody, err = io.ReadAll(req.Body) + if err != nil { + l.Error().Err(err).Msg("Failed to read body while logging request") + return err + } + + req.Body = io.NopCloser(bytes.NewBuffer(reqBody)) + } + + le = le.Bytes("req_body", config.RequestBodyLogReplacer(reqBody)) + } + if config.LogRequestHeader { + header := zerolog.Dict() + for k, v := range config.RequestHeaderLogReplacer(req.Header) { + header.Strs(k, v) + } + + le = le.Dict("req_header", header) + } + if config.LogRequestQuery { + query := zerolog.Dict() + for k, v := range req.URL.Query() { + query.Strs(k, v) + } + + le = le.Dict("req_query", query) + } + + le.Msg("Request received") + + c.SetRequest(req) + + var resBody bytes.Buffer + if config.LogResponseBody { + mw := io.MultiWriter(res.Writer, &resBody) + writer := &bodyDumpResponseWriter{Writer: mw, ResponseWriter: res.Writer} + res.Writer = writer + } + + start := time.Now() + err := next(c) + if err != nil { + c.Error(err) + } + stop := time.Now() + + // Retrieve logger from context again since other middlewares might have enhanced it + ll := util.LogFromEchoContext(c) + lle := ll.WithLevel(config.Level). + Dict("res", zerolog.Dict(). + Int("status", res.Status). + Int64("bytes_out", res.Size). + TimeDiff("duration_ms", stop, start). + Err(err), + ) + + if config.LogResponseBody && !config.ResponseBodyLogSkipper(req, res) { + lle = lle.Bytes("res_body", config.ResponseBodyLogReplacer(resBody.Bytes())) + } + if config.LogResponseHeader { + header := zerolog.Dict() + for k, v := range config.ResponseHeaderLogReplacer(res.Header()) { + header.Strs(k, v) + } + + lle = lle.Dict("res_header", header) + } + + lle.Msg("Response sent") + + return nil + } + } +} + +type bodyDumpResponseWriter struct { + io.Writer + http.ResponseWriter +} + +func (w *bodyDumpResponseWriter) WriteHeader(code int) { + w.ResponseWriter.WriteHeader(code) +} + +func (w *bodyDumpResponseWriter) Write(b []byte) (int, error) { + return w.Writer.Write(b) +} + +func (w *bodyDumpResponseWriter) Flush() { + w.ResponseWriter.(http.Flusher).Flush() +} + +func (w *bodyDumpResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return w.ResponseWriter.(http.Hijacker).Hijack() +} diff --git a/internal/api/server_config.go b/internal/api/server_config.go index 41194f9..316cace 100644 --- a/internal/api/server_config.go +++ b/internal/api/server_config.go @@ -1,17 +1,61 @@ package api -import "github.com/allaboutapps/integresql/pkg/util" +import ( + "github.com/allaboutapps/integresql/pkg/util" + "github.com/rs/zerolog" +) type ServerConfig struct { Address string Port int DebugEndpoints bool + Logger LoggerConfig + Echo EchoConfig +} + +type EchoConfig struct { + Debug bool + ListenAddress string + EnableCORSMiddleware bool + EnableLoggerMiddleware bool + EnableRecoverMiddleware bool + EnableRequestIDMiddleware bool + EnableTrailingSlashMiddleware bool +} + +type LoggerConfig struct { + Level zerolog.Level + RequestLevel zerolog.Level + LogRequestBody bool + LogRequestHeader bool + LogRequestQuery bool + LogResponseBody bool + LogResponseHeader bool + PrettyPrintConsole bool } func DefaultServerConfigFromEnv() ServerConfig { return ServerConfig{ Address: util.GetEnv("INTEGRESQL_ADDRESS", ""), Port: util.GetEnvAsInt("INTEGRESQL_PORT", 5000), - DebugEndpoints: util.GetEnvAsBool("INTEGRESQL_DEBUG_ENDPOINTS", true), + DebugEndpoints: util.GetEnvAsBool("INTEGRESQL_DEBUG_ENDPOINTS", true), // https://golang.org/pkg/net/http/pprof/ + Echo: EchoConfig{ + Debug: util.GetEnvAsBool("INTEGRESQL_ECHO_DEBUG", false), + EnableCORSMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_CORS_MIDDLEWARE", true), + EnableLoggerMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_LOGGER_MIDDLEWARE", true), + EnableRecoverMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_RECOVER_MIDDLEWARE", true), + EnableRequestIDMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_REQUEST_ID_MIDDLEWARE", true), + EnableTrailingSlashMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_TRAILING_SLASH_MIDDLEWARE", true), + }, + Logger: LoggerConfig{ + Level: util.LogLevelFromString(util.GetEnv("INTEGRESQL_LOGGER_LEVEL", zerolog.InfoLevel.String())), + RequestLevel: util.LogLevelFromString(util.GetEnv("INTEGRESQL_LOGGER_REQUEST_LEVEL", zerolog.DebugLevel.String())), + LogRequestBody: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_REQUEST_BODY", false), + LogRequestHeader: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_REQUEST_HEADER", false), + LogRequestQuery: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_REQUEST_QUERY", false), + LogResponseBody: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_RESPONSE_BODY", false), + LogResponseHeader: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_RESPONSE_HEADER", false), + PrettyPrintConsole: util.GetEnvAsBool("INTEGRESQL_LOGGER_PRETTY_PRINT_CONSOLE", false), + }, } } diff --git a/internal/router/echo_logger.go b/internal/router/echo_logger.go new file mode 100644 index 0000000..0931855 --- /dev/null +++ b/internal/router/echo_logger.go @@ -0,0 +1,13 @@ +package router + +import "github.com/rs/zerolog" + +type echoLogger struct { + level zerolog.Level + log zerolog.Logger +} + +func (l *echoLogger) Write(p []byte) (n int, err error) { + l.log.WithLevel(l.level).Msgf("%s", p) + return len(p), nil +} diff --git a/internal/router/router.go b/internal/router/router.go index 35d5505..4fac61d 100644 --- a/internal/router/router.go +++ b/internal/router/router.go @@ -5,28 +5,67 @@ import ( "github.com/allaboutapps/integresql/internal/api" "github.com/allaboutapps/integresql/internal/api/admin" + "github.com/allaboutapps/integresql/internal/api/middleware" "github.com/allaboutapps/integresql/internal/api/templates" "github.com/labstack/echo/v4" echoMiddleware "github.com/labstack/echo/v4/middleware" + "github.com/rs/zerolog/log" ) func Init(s *api.Server) { s.Echo = echo.New() - s.Echo.Debug = false + s.Echo.Debug = s.Config.Echo.Debug s.Echo.HideBanner = true + s.Echo.Logger.SetOutput(&echoLogger{level: s.Config.Logger.RequestLevel, log: log.With().Str("component", "echo").Logger()}) - s.Echo.Pre(echoMiddleware.RemoveTrailingSlash()) + // --- + // General middleware + if s.Config.Echo.EnableTrailingSlashMiddleware { + s.Echo.Pre(echoMiddleware.RemoveTrailingSlash()) + } else { + log.Warn().Msg("Disabling trailing slash middleware due to environment config") + } - s.Echo.Use(echoMiddleware.Recover()) - s.Echo.Use(echoMiddleware.RequestID()) - s.Echo.Use(echoMiddleware.Logger()) + if s.Config.Echo.EnableRecoverMiddleware { + s.Echo.Use(echoMiddleware.Recover()) + } else { + log.Warn().Msg("Disabling recover middleware due to environment config") + } - admin.InitRoutes(s) - templates.InitRoutes(s) + if s.Config.Echo.EnableRequestIDMiddleware { + s.Echo.Use(echoMiddleware.RequestID()) + } else { + log.Warn().Msg("Disabling request ID middleware due to environment config") + } + + if s.Config.Echo.EnableLoggerMiddleware { + s.Echo.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{ + Level: s.Config.Logger.RequestLevel, + LogRequestBody: s.Config.Logger.LogRequestBody, + LogRequestHeader: s.Config.Logger.LogRequestHeader, + LogRequestQuery: s.Config.Logger.LogRequestQuery, + LogResponseBody: s.Config.Logger.LogResponseBody, + LogResponseHeader: s.Config.Logger.LogResponseHeader, + RequestBodyLogSkipper: func(req *http.Request) bool { + return middleware.DefaultRequestBodyLogSkipper(req) + }, + ResponseBodyLogSkipper: func(req *http.Request, res *echo.Response) bool { + return middleware.DefaultResponseBodyLogSkipper(req, res) + }, + Skipper: func(c echo.Context) bool { + return false + }, + })) + } else { + log.Warn().Msg("Disabling logger middleware due to environment config") + } // enable debug endpoints only if requested if s.Config.DebugEndpoints { s.Echo.GET("/debug/*", echo.WrapHandler(http.DefaultServeMux)) } + + admin.InitRoutes(s) + templates.InitRoutes(s) } diff --git a/internal/router/router_test.go b/internal/router/router_test.go new file mode 100644 index 0000000..c393096 --- /dev/null +++ b/internal/router/router_test.go @@ -0,0 +1,35 @@ +package router_test + +import ( + "testing" + + "github.com/allaboutapps/integresql/internal/api" + "github.com/allaboutapps/integresql/internal/test" + "github.com/stretchr/testify/require" +) + +func TestPprofEnabledNoAuth(t *testing.T) { + config := api.DefaultServerConfigFromEnv() + + // these are typically our default values, however we force set them here to ensure those are set while test execution. + config.DebugEndpoints = true + + test.WithTestServerConfigurable(t, config, func(s *api.Server) { + res := test.PerformRequest(t, s, "GET", "/debug/pprof/heap/", nil, nil) + require.Equal(t, 200, res.Result().StatusCode) + + // index + res = test.PerformRequest(t, s, "GET", "/debug/pprof/", nil, nil) + require.Equal(t, 301, res.Result().StatusCode) + }) +} + +func TestPprofDisabled(t *testing.T) { + config := api.DefaultServerConfigFromEnv() + config.DebugEndpoints = false + + test.WithTestServerConfigurable(t, config, func(s *api.Server) { + res := test.PerformRequest(t, s, "GET", "/debug/pprof/heap", nil, nil) + require.Equal(t, 404, res.Result().StatusCode) + }) +} diff --git a/internal/test/helper_request.go b/internal/test/helper_request.go new file mode 100644 index 0000000..7693358 --- /dev/null +++ b/internal/test/helper_request.go @@ -0,0 +1,132 @@ +package test + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/allaboutapps/integresql/internal/api" + "github.com/labstack/echo/v4" +) + +type GenericPayload map[string]interface{} +type GenericArrayPayload []interface{} + +func (g GenericPayload) Reader(t *testing.T) *bytes.Reader { + t.Helper() + + b, err := json.Marshal(g) + if err != nil { + t.Fatalf("failed to serialize payload: %v", err) + } + + return bytes.NewReader(b) +} + +func (g GenericArrayPayload) Reader(t *testing.T) *bytes.Reader { + t.Helper() + + b, err := json.Marshal(g) + if err != nil { + t.Fatalf("failed to serialize payload: %v", err) + } + + return bytes.NewReader(b) +} + +func PerformRequestWithParams(t *testing.T, s *api.Server, method string, path string, body GenericPayload, headers http.Header, queryParams map[string]string) *httptest.ResponseRecorder { + t.Helper() + + if body == nil { + return PerformRequestWithRawBody(t, s, method, path, nil, headers, queryParams) + } + + return PerformRequestWithRawBody(t, s, method, path, body.Reader(t), headers, queryParams) +} + +func PerformRequestWithArrayAndParams(t *testing.T, s *api.Server, method string, path string, body GenericArrayPayload, headers http.Header, queryParams map[string]string) *httptest.ResponseRecorder { + t.Helper() + + if body == nil { + return PerformRequestWithRawBody(t, s, method, path, nil, headers, queryParams) + } + + return PerformRequestWithRawBody(t, s, method, path, body.Reader(t), headers, queryParams) +} + +func PerformRequestWithRawBody(t *testing.T, s *api.Server, method string, path string, body io.Reader, headers http.Header, queryParams map[string]string) *httptest.ResponseRecorder { + t.Helper() + + req := httptest.NewRequest(method, path, body) + + if headers != nil { + req.Header = headers + } + if body != nil && len(req.Header.Get(echo.HeaderContentType)) == 0 { + req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON) + } + + if queryParams != nil { + q := req.URL.Query() + for k, v := range queryParams { + q.Add(k, v) + } + + req.URL.RawQuery = q.Encode() + } + + res := httptest.NewRecorder() + + s.Echo.ServeHTTP(res, req) + + return res +} + +func PerformRequest(t *testing.T, s *api.Server, method string, path string, body GenericPayload, headers http.Header) *httptest.ResponseRecorder { + t.Helper() + + return PerformRequestWithParams(t, s, method, path, body, headers, nil) +} + +func PerformRequestWithArray(t *testing.T, s *api.Server, method string, path string, body GenericArrayPayload, headers http.Header) *httptest.ResponseRecorder { + t.Helper() + + return PerformRequestWithArrayAndParams(t, s, method, path, body, headers, nil) +} + +func ParseResponseBody(t *testing.T, res *httptest.ResponseRecorder, v interface{}) { + t.Helper() + + if err := json.NewDecoder(res.Result().Body).Decode(&v); err != nil { + t.Fatalf("Failed to parse response body: %v", err) + } +} + +// func ParseResponseAndValidate(t *testing.T, res *httptest.ResponseRecorder, v runtime.Validatable) { +// t.Helper() + +// ParseResponseBody(t, res, &v) + +// if err := v.Validate(strfmt.Default); err != nil { +// t.Fatalf("Failed to validate response: %v", err) +// } +// } + +func HeadersWithAuth(t *testing.T, token string) http.Header { + t.Helper() + + return HeadersWithConfigurableAuth(t, "Bearer", token) +} + +func HeadersWithConfigurableAuth(t *testing.T, scheme string, token string) http.Header { + t.Helper() + + headers := http.Header{} + headers.Set(echo.HeaderAuthorization, fmt.Sprintf("%s %s", scheme, token)) + + return headers +} diff --git a/internal/test/test_server.go b/internal/test/test_server.go new file mode 100644 index 0000000..45c108c --- /dev/null +++ b/internal/test/test_server.go @@ -0,0 +1,62 @@ +package test + +import ( + "context" + "testing" + + "github.com/allaboutapps/integresql/internal/api" + "github.com/allaboutapps/integresql/internal/router" +) + +// WithTestServer returns a fully configured server (using the default server config). +func WithTestServer(t *testing.T, closure func(s *api.Server)) { + t.Helper() + defaultConfig := api.DefaultServerConfigFromEnv() + WithTestServerConfigurable(t, defaultConfig, closure) +} + +// WithTestServerConfigurable returns a fully configured server, allowing for configuration using the provided server config. +func WithTestServerConfigurable(t *testing.T, config api.ServerConfig, closure func(s *api.Server)) { + t.Helper() + ctx := context.Background() + WithTestServerConfigurableContext(ctx, t, config, closure) +} + +// WithTestServerConfigurableContext returns a fully configured server, allowing for configuration using the provided server config. +// The provided context will be used during setup (instead of the default background context). +func WithTestServerConfigurableContext(ctx context.Context, t *testing.T, config api.ServerConfig, closure func(s *api.Server)) { + t.Helper() + execClosureNewTestServer(ctx, t, config, closure) + +} + +// Executes closure on a new test server +func execClosureNewTestServer(ctx context.Context, t *testing.T, config api.ServerConfig, closure func(s *api.Server)) { + t.Helper() + + // https://stackoverflow.com/questions/43424787/how-to-use-next-available-port-in-http-listenandserve + // You may use port 0 to indicate you're not specifying an exact port but you want a free, available port selected by the system + config.Address = ":0" + + s := api.NewServer(config) + + if err := s.InitManager(ctx); err != nil { + t.Fatalf("failed to start manager: %v", err) + } + + router.Init(s) + + closure(s) + + // echo is managed and should close automatically after running the test + if err := s.Echo.Shutdown(ctx); err != nil { + t.Fatalf("failed to shutdown server: %v", err) + } + + if err := s.Manager.Disconnect(ctx, true); err != nil { + t.Fatalf("failed to shutdown manager: %v", err) + } + + // disallow any further refs to managed object after running the test + s = nil +} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 24f6218..67f13f0 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -3,6 +3,7 @@ package manager import ( "context" "database/sql" + "encoding/json" "errors" "fmt" "runtime/trace" @@ -11,7 +12,10 @@ import ( "github.com/allaboutapps/integresql/pkg/db" "github.com/allaboutapps/integresql/pkg/pool" "github.com/allaboutapps/integresql/pkg/templates" + "github.com/allaboutapps/integresql/pkg/util" "github.com/lib/pq" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) var ( @@ -64,6 +68,15 @@ func New(config ManagerConfig) (*Manager, ManagerConfig) { config.PoolConfig.MaxParallelTasks = 1 } + // debug log final derived config + c, err := json.Marshal(config) + + if err != nil { + log.Fatal().Err(err).Msg("Failed to marshal the env") + } + + log.Debug().RawJSON("config", c).Msg("manager.New") + m := &Manager{ config: config, db: nil, @@ -80,38 +93,55 @@ func DefaultFromEnv() *Manager { } func (m *Manager) Connect(ctx context.Context) error { + + log := m.getManagerLogger(ctx, "Connect") + if m.db != nil { - return errors.New("manager is already connected") + err := errors.New("manager is already connected") + log.Error().Err(err) + return err } db, err := sql.Open("postgres", m.config.ManagerDatabaseConfig.ConnectionString()) if err != nil { + log.Error().Err(err).Msg("unable to connect") return err } if err := db.PingContext(ctx); err != nil { + log.Error().Err(err).Msg("unable to ping") return err } m.db = db + log.Debug().Msg("connected.") + return nil } -func (m *Manager) Disconnect(_ context.Context, ignoreCloseError bool) error { +func (m *Manager) Disconnect(ctx context.Context, ignoreCloseError bool) error { + + log := m.getManagerLogger(ctx, "Disconnect").With().Bool("ignoreCloseError", ignoreCloseError).Logger() + if m.db == nil { - return errors.New("manager is not connected") + err := errors.New("manager is not connected") + log.Error().Err(err) + return err } // stop the pool before closing DB connection m.pool.Stop() if err := m.db.Close(); err != nil && !ignoreCloseError { + log.Error().Err(err) return err } m.db = nil + log.Warn().Msg("disconnected.") + return nil } @@ -128,37 +158,53 @@ func (m Manager) Ready() bool { } func (m *Manager) Initialize(ctx context.Context) error { + + log := m.getManagerLogger(ctx, "Initialize") + if !m.Ready() { if err := m.Connect(ctx); err != nil { + log.Error().Err(err) return err } } rows, err := m.db.QueryContext(ctx, "SELECT datname FROM pg_database WHERE datname LIKE $1", fmt.Sprintf("%s_%s_%%", m.config.DatabasePrefix, m.config.PoolConfig.TestDBNamePrefix)) if err != nil { + log.Error().Err(err) return err } defer rows.Close() + log.Debug().Msg("Dropping unmanaged dbs...") + for rows.Next() { var dbName string if err := rows.Scan(&dbName); err != nil { return err } + log.Warn().Str("dbName", dbName).Msg("Dropping...") + if _, err := m.db.Exec(fmt.Sprintf("DROP DATABASE %s", pq.QuoteIdentifier(dbName))); err != nil { + log.Error().Str("dbName", dbName).Err(err) return err } } + log.Info().Msg("initialized.") + return nil } func (m Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { ctx, task := trace.NewTask(ctx, "initialize_template_db") + + log := m.getManagerLogger(ctx, "InitializeTemplateDatabase").With().Str("hash", hash).Logger() + defer task.End() if !m.Ready() { + log.Error().Msg("not ready") return db.TemplateDatabase{}, ErrManagerNotReady } @@ -183,6 +229,8 @@ func (m Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (d reg := trace.StartRegion(ctx, "drop_and_create_db") if err := m.dropAndCreateDatabase(ctx, dbName, m.config.ManagerDatabaseConfig.Username, m.config.TemplateDatabaseTemplate); err != nil { + + log.Error().Err(err).Msg("triggering unsafe remove after dropAndCreateDatabase failed...") m.templates.RemoveUnsafe(ctx, hash) return db.TemplateDatabase{}, err @@ -192,6 +240,8 @@ func (m Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (d // if template config has been overwritten, the existing pool needs to be removed err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB) if err != nil && !errors.Is(err, pool.ErrUnknownHash) { + + log.Error().Err(err).Msg("triggering unsafe remove after RemoveAllWithHash failed...") m.templates.RemoveUnsafe(ctx, hash) return db.TemplateDatabase{}, err @@ -208,14 +258,18 @@ func (m Manager) InitializeTemplateDatabase(ctx context.Context, hash string) (d func (m Manager) DiscardTemplateDatabase(ctx context.Context, hash string) error { ctx, task := trace.NewTask(ctx, "discard_template_db") + log := m.getManagerLogger(ctx, "DiscardTemplateDatabase").With().Str("hash", hash).Logger() + defer task.End() if !m.Ready() { + log.Error().Msg("not ready") return ErrManagerNotReady } // first remove all DB with this hash if err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB); err != nil && !errors.Is(err, pool.ErrUnknownHash) { + log.Error().Err(err).Msg("remove all err") return err } @@ -224,6 +278,9 @@ func (m Manager) DiscardTemplateDatabase(ctx context.Context, hash string) error if !found { // even if a template is not found in the collection, it might still exist in the DB + + log.Warn().Msg("template not found, checking for existance...") + dbName = m.makeTemplateDatabaseName(hash) exists, err := m.checkDatabaseExists(ctx, dbName) if err != nil { @@ -237,19 +294,26 @@ func (m Manager) DiscardTemplateDatabase(ctx context.Context, hash string) error template.SetState(ctx, templates.TemplateStateDiscarded) } + log.Debug().Msg("found template database, dropping...") + return m.dropDatabase(ctx, dbName) } func (m Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db.TemplateDatabase, error) { ctx, task := trace.NewTask(ctx, "finalize_template_db") + + log := m.getManagerLogger(ctx, "FinalizeTemplateDatabase").With().Str("hash", hash).Logger() + defer task.End() if !m.Ready() { + log.Error().Msg("not ready") return db.TemplateDatabase{}, ErrManagerNotReady } template, found := m.templates.Get(ctx, hash) if !found { + log.Error().Msg("bailout: template not found") return db.TemplateDatabase{}, ErrTemplateNotFound } @@ -258,28 +322,36 @@ func (m Manager) FinalizeTemplateDatabase(ctx context.Context, hash string) (db. // early bailout if we are already ready (multiple calls) if state == templates.TemplateStateFinalized { + log.Warn().Msg("bailout: template already finalized") return db.TemplateDatabase{Database: template.Database}, ErrTemplateAlreadyInitialized } // Disallow transition from discarded to ready if state == templates.TemplateStateDiscarded { + log.Error().Msg("bailout: template discarded!") return db.TemplateDatabase{}, ErrTemplateDiscarded } // Init a pool with this hash + log.Trace().Msg("init hash pool...") m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB) lockedTemplate.SetState(ctx, templates.TemplateStateFinalized) + log.Debug().Msg("Template database finalized successfully.") return db.TemplateDatabase{Database: template.Database}, nil } // GetTestDatabase tries to get a ready test DB from an existing pool. func (m Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatabase, error) { ctx, task := trace.NewTask(ctx, "get_test_db") + + log := m.getManagerLogger(ctx, "GetTestDatabase").With().Str("hash", hash).Logger() + defer task.End() if !m.Ready() { + log.Error().Msg("not ready") return db.TestDatabase{}, ErrManagerNotReady } @@ -302,6 +374,7 @@ func (m Manager) GetTestDatabase(ctx context.Context, hash string) (db.TestDatab // Template exists, but the pool is not there - // it must have been removed. // It needs to be reinitialized. + log.Warn().Err(err).Msg("ErrUnknownHash, going to InitHashPool and recursively calling us again...") m.pool.InitHashPool(ctx, template.Database, m.recreateTestPoolDB) testDB, err = m.pool.GetTestDatabase(ctx, template.TemplateHash, m.config.TestDatabaseGetTimeout) @@ -336,14 +409,7 @@ func (m Manager) ReturnTestDatabase(ctx context.Context, hash string, id int) er } // template is ready, we can return unchanged testDB to the pool - if err := m.pool.ReturnTestDatabase(ctx, hash, id); err != nil { - - fmt.Printf("manager.ReturnTestDatabase error: %v\n", err) - - return err - } - - return nil + return m.pool.ReturnTestDatabase(ctx, hash, id) } // RecreateTestDatabase recreates the test DB according to the template and returns it back to the pool. @@ -367,21 +433,20 @@ func (m *Manager) RecreateTestDatabase(ctx context.Context, hash string, id int) } // template is ready, we can return the testDB to the pool and have it cleaned up - if err := m.pool.RecreateTestDatabase(ctx, hash, id); err != nil { - - fmt.Printf("manager.RecreateTestDatabase error: %v\n", err) - - return err - } - - return nil + return m.pool.RecreateTestDatabase(ctx, hash, id) } func (m Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) error { + + log := m.getManagerLogger(ctx, "ClearTrackedTestDatabases").With().Str("hash", hash).Logger() + if !m.Ready() { + log.Error().Msg("not ready") return ErrManagerNotReady } + log.Warn().Msg("clearing...") + err := m.pool.RemoveAllWithHash(ctx, hash, m.dropTestPoolDB) if errors.Is(err, pool.ErrUnknownHash) { return ErrTemplateNotFound @@ -391,10 +456,16 @@ func (m Manager) ClearTrackedTestDatabases(ctx context.Context, hash string) err } func (m Manager) ResetAllTracking(ctx context.Context) error { + + log := m.getManagerLogger(ctx, "ResetAllTracking") + if !m.Ready() { + log.Error().Msg("not ready") return ErrManagerNotReady } + log.Warn().Msg("resetting...") + // remove all templates to disallow any new test DB creation from existing templates m.templates.RemoveAll(ctx) @@ -404,7 +475,8 @@ func (m Manager) ResetAllTracking(ctx context.Context) error { func (m Manager) checkDatabaseExists(ctx context.Context, dbName string) (bool, error) { var exists bool - // fmt.Printf("SELECT 1 AS exists FROM pg_database WHERE datname = %s\n", dbName) + log := m.getManagerLogger(ctx, "checkDatabaseExists") + log.Trace().Msgf("SELECT 1 AS exists FROM pg_database WHERE datname = %s\n", dbName) if err := m.db.QueryRowContext(ctx, "SELECT 1 AS exists FROM pg_database WHERE datname = $1", dbName).Scan(&exists); err != nil { if err == sql.ErrNoRows { @@ -440,7 +512,8 @@ func (m Manager) createDatabase(ctx context.Context, dbName string, owner string defer trace.StartRegion(ctx, "create_db").End() - // fmt.Printf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s\n", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template)) + log := m.getManagerLogger(ctx, "createDatabase") + log.Trace().Msgf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s\n", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template)) if _, err := m.db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s WITH OWNER %s TEMPLATE %s", pq.QuoteIdentifier(dbName), pq.QuoteIdentifier(owner), pq.QuoteIdentifier(template))); err != nil { return err @@ -472,7 +545,8 @@ func (m Manager) dropDatabase(ctx context.Context, dbName string) error { defer trace.StartRegion(ctx, "drop_db").End() - // fmt.Printf("DROP DATABASE IF EXISTS %s\n", pq.QuoteIdentifier(dbName)) + log := m.getManagerLogger(ctx, "dropDatabase") + log.Trace().Msgf("DROP DATABASE IF EXISTS %s\n", pq.QuoteIdentifier(dbName)) if _, err := m.db.ExecContext(ctx, fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(dbName))); err != nil { if strings.Contains(err.Error(), "is being accessed by other users") { @@ -500,3 +574,7 @@ func (m Manager) dropAndCreateDatabase(ctx context.Context, dbName string, owner func (m Manager) makeTemplateDatabaseName(hash string) string { return fmt.Sprintf("%s_%s_%s", m.config.DatabasePrefix, m.config.TemplateDatabasePrefix, hash) } + +func (m Manager) getManagerLogger(ctx context.Context, managerFunction string) zerolog.Logger { + return util.LogFromContext(ctx).With().Str("managerFn", managerFunction).Logger() +} diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index 24c56e7..d59f000 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -11,13 +11,13 @@ import ( // we explicitly want to access this struct via manager.ManagerConfig, thus we disable revive for the next line type ManagerConfig struct { //nolint:revive - ManagerDatabaseConfig db.DatabaseConfig + ManagerDatabaseConfig db.DatabaseConfig `json:"-"` // sensitive TemplateDatabaseTemplate string DatabasePrefix string TemplateDatabasePrefix string TestDatabaseOwner string - TestDatabaseOwnerPassword string + TestDatabaseOwnerPassword string `json:"-"` // sensitive TemplateFinalizeTimeout time.Duration // Time to wait for a template to transition into the 'finalized' state TestDatabaseGetTimeout time.Duration // Time to wait for a ready database diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index a2488cf..45f2a44 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -509,7 +509,7 @@ func TestManagerDiscardTemplateDatabase(t *testing.T) { if err == nil { success++ } else { - // fmt.Println(err) + // t.Log(err) errored++ } } @@ -577,7 +577,7 @@ func TestManagerDiscardThenReinitializeTemplateDatabase(t *testing.T) { if err == nil { success++ } else { - // fmt.Println(err) + t.Log(err) errored++ } } diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index a7a16c7..869e581 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -3,12 +3,13 @@ package pool import ( "context" "errors" - "fmt" "runtime/trace" "sync" "time" "github.com/allaboutapps/integresql/pkg/db" + "github.com/allaboutapps/integresql/pkg/util" + "github.com/rs/zerolog" ) var ( @@ -27,8 +28,6 @@ const ( dbStateRecreating // In the process of being recreated (to prevent concurrent cleans) ) -const minConcurrentTasksNum = 1 - type existingDB struct { state dbState db.TestDatabase @@ -75,10 +74,6 @@ type HashPool struct { // Starts the workers to extend the pool in background up to requested inital number. func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFunc) *HashPool { - if cfg.MaxParallelTasks < minConcurrentTasksNum { - cfg.MaxParallelTasks = minConcurrentTasksNum - } - pool := &HashPool{ dbs: make([]existingDB, 0, cfg.MaxPoolSize), ready: make(chan int, cfg.MaxPoolSize), @@ -97,10 +92,15 @@ func NewHashPool(cfg PoolConfig, templateDB db.Database, initDBFunc RecreateDBFu } func (pool *HashPool) Start() { + + log := pool.getPoolLogger(context.Background(), "Start") pool.Lock() + log.Debug().Msg("starting...") + defer pool.Unlock() if pool.running { + log.Warn().Msg("bailout already running!") return } @@ -118,11 +118,18 @@ func (pool *HashPool) Start() { defer pool.wg.Done() pool.controlLoop(ctx, cancel) }() + + log.Info().Msg("started!") } func (pool *HashPool) Stop() { + + log := pool.getPoolLogger(context.Background(), "Stop") + log.Debug().Msg("stopping...") + pool.Lock() if !pool.running { + log.Warn().Msg("bailout already stopped!") return } pool.running = false @@ -131,24 +138,29 @@ func (pool *HashPool) Stop() { pool.tasksChan <- workerTaskStop pool.wg.Wait() pool.workerContext = nil + log.Warn().Msg("stopped!") } -func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout time.Duration) (db db.TestDatabase, err error) { +func (pool *HashPool) GetTestDatabase(ctx context.Context, timeout time.Duration) (db db.TestDatabase, err error) { var index int - fmt.Printf("pool#%s: waiting for ready ID...\n", hash) + log := pool.getPoolLogger(ctx, "GetTestDatabase") + log.Trace().Msg("waiting for ready ID...") select { case <-time.After(timeout): err = ErrTimeout + log.Error().Err(err).Dur("timeout", timeout).Msg("timeout") return case <-ctx.Done(): err = ctx.Err() + log.Warn().Err(err).Msg("ctx done") return case index = <-pool.ready: } - fmt.Printf("pool#%s: got ready ID=%v\n", hash, index) + log = log.With().Int("id", index).Logger() + log.Trace().Msg("got ready testdatabase!") reg := trace.StartRegion(ctx, "wait_for_lock_hash_pool") pool.Lock() @@ -158,16 +170,15 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout // sanity check, should never happen if index < 0 || index >= len(pool.dbs) { err = ErrInvalidIndex + log.Error().Err(err).Int("dbs", len(pool.dbs)).Msg("index out of bounds!") return } testDB := pool.dbs[index] // sanity check, should never happen - we got this index from 'ready' channel if testDB.state != dbStateReady { - - fmt.Printf("pool#%s: GetTestDatabase ErrInvalidState ID=%v\n", hash, index) - err = ErrInvalidState + log.Error().Err(err).Msgf("testdatabase is not in ready state=%v!", testDB.state) return } @@ -179,24 +190,26 @@ func (pool *HashPool) GetTestDatabase(ctx context.Context, hash string, timeout pool.dirty <- index if len(pool.dbs) < pool.PoolConfig.MaxPoolSize { - fmt.Printf("pool#%s: Conditional extend\n", hash) + log.Trace().Msg("push workerTaskExtend") pool.tasksChan <- workerTaskExtend } // we try to ensure that InitialPoolSize count is staying ready // thus, we try to move the oldest dirty dbs into recreating with the workerTaskAutoCleanDirty if len(pool.dbs) >= pool.PoolConfig.MaxPoolSize && (len(pool.ready)+len(pool.recreating)) < pool.InitialPoolSize { + log.Trace().Msg("push workerTaskAutoCleanDirty") pool.tasksChan <- workerTaskAutoCleanDirty } - fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (GetTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + pool.unsafeTraceLogStats(log) return testDB.TestDatabase, nil } func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan workerTask, MaxParallelTasks int) { - fmt.Printf("pool#%s: workerTaskLoop\n", pool.templateDB.TemplateHash) + log := pool.getPoolLogger(ctx, "workerTaskLoop") + log.Debug().Msg("starting...") handlers := map[workerTask]func(ctx context.Context) error{ workerTaskExtend: ignoreErrs(pool.extend, ErrPoolFull, context.Canceled), @@ -209,12 +222,13 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan worker for task := range taskChan { handler, ok := handlers[task] if !ok { - fmt.Printf("invalid task: %s", task) + log.Error().Msgf("invalid task: %s", task) continue } select { case <-ctx.Done(): + log.Warn().Err(ctx.Err()).Msg("ctx done!") return case semaphore <- struct{}{}: } @@ -227,10 +241,10 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan worker <-semaphore }() - fmt.Printf("pool#%s: workerTaskLoop task=%v\n", pool.templateDB.TemplateHash, task) + log.Debug().Msgf("task=%v", task) if err := handler(ctx); err != nil { - fmt.Printf("pool#%s: workerTaskLoop task=%v FAILED! err=%v\n", pool.templateDB.TemplateHash, task, err.Error()) + log.Error().Err(err).Msgf("task=%v FAILED!", task) } }(task) @@ -239,9 +253,9 @@ func (pool *HashPool) workerTaskLoop(ctx context.Context, taskChan <-chan worker func (pool *HashPool) controlLoop(ctx context.Context, cancel context.CancelFunc) { - fmt.Printf("pool#%s: controlLoop\n", pool.templateDB.TemplateHash) + log := pool.getPoolLogger(ctx, "controlLoop") + log.Debug().Msg("starting...") - // ctx, cancel := context.WithCancel(context.Background()) defer cancel() workerTasksChan := make(chan workerTask, len(pool.tasksChan)) @@ -253,6 +267,7 @@ func (pool *HashPool) controlLoop(ctx context.Context, cancel context.CancelFunc for task := range pool.tasksChan { if task == workerTaskStop { + log.Debug().Msg("stopping...") close(workerTasksChan) cancel() return @@ -269,21 +284,28 @@ func (pool *HashPool) controlLoop(ctx context.Context, cancel context.CancelFunc // ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). func (pool *HashPool) ReturnTestDatabase(ctx context.Context, id int) error { + + log := pool.getPoolLogger(ctx, "ReturnTestDatabase").With().Int("id", id).Logger() + log.Debug().Msg("returning...") + pool.Lock() defer pool.Unlock() - if ctx.Err() != nil { + if err := ctx.Err(); err != nil { // client vanished - return ctx.Err() + log.Warn().Err(err).Msg("bailout client vanished!") + return err } if id < 0 || id >= len(pool.dbs) { + log.Warn().Int("dbs", len(pool.dbs)).Msg("bailout invalid index!") return ErrInvalidIndex } // check if db is in the correct state testDB := pool.dbs[id] if testDB.state != dbStateDirty { + log.Warn().Int("dbs", len(pool.dbs)).Msgf("bailout invalid state=%v.", testDB.state) return nil } @@ -292,28 +314,28 @@ func (pool *HashPool) ReturnTestDatabase(ctx context.Context, id int) error { pool.dbs[id] = testDB // remove id from dirty and add it to ready channel - pool.excludeIDFromDirtyChannel(id) + pool.excludeIDFromChannel(pool.dirty, id) pool.ready <- id - fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (ReturnTestDatabase)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) + pool.unsafeTraceLogStats(log) return nil } -func (pool *HashPool) excludeIDFromDirtyChannel(id int) { +func (pool *HashPool) excludeIDFromChannel(ch chan int, excludeID int) { - // The testDB identified by overgiven id may still in the dirty channel. We want to exclude it. + // The testDB identified by overgiven id may still in a specific channel (typically dirty). We want to exclude it. // We need to explicitly remove it from there by filtering the current channel to a tmp channel. - // We finally close the tmp channel and flush it onto the dirty channel again. + // We finally close the tmp channel and flush it onto the specific channel again. // The id is now no longer in the channel. - filteredDirty := make(chan int, pool.MaxPoolSize) + filtered := make(chan int, pool.MaxPoolSize) - var dirtyID int + var id int for loop := true; loop; { select { - case dirtyID = <-pool.dirty: - if dirtyID != id { - filteredDirty <- dirtyID + case id = <-ch: + if id != excludeID { + filtered <- id } default: loop = false @@ -321,55 +343,66 @@ func (pool *HashPool) excludeIDFromDirtyChannel(id int) { } } - // filteredDirty now has all filtered values without the above id, redirect the other ids back to the dirty channel. + // filtered now has all filtered values without the above id, redirect the other ids back to the specific channel. // close so we can range over it... - close(filteredDirty) + close(filtered) - for dirtyID := range filteredDirty { - pool.dirty <- dirtyID + for id := range filtered { + ch <- id } } // RecreateTestDatabase prioritizes the test DB to be recreated next via the dirty worker. func (pool *HashPool) RecreateTestDatabase(ctx context.Context, id int) error { + log := pool.getPoolLogger(ctx, "RecreateTestDatabase").With().Int("id", id).Logger() + log.Debug().Msg("flag testdatabase for recreation...") + pool.RLock() + if id < 0 || id >= len(pool.dbs) { + log.Warn().Int("dbs", len(pool.dbs)).Msg("bailout invalid index!") pool.RUnlock() return ErrInvalidIndex } - fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (RecreateTestDatabase %v)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize, id) pool.RUnlock() - if ctx.Err() != nil { + if err := ctx.Err(); err != nil { // client vanished - return ctx.Err() + log.Warn().Err(err).Msg("bailout client vanished!") + return err } // exclude from the normal dirty channel, force recreation in a background worker... - pool.excludeIDFromDirtyChannel(id) + pool.excludeIDFromChannel(pool.dirty, id) // directly spawn a new worker in the bg (with the same ctx as the typical workers) // note that this runs unchained, meaning we do not care about errors that may happen via this bg task //nolint:errcheck go pool.recreateDatabaseGracefully(pool.workerContext, id) + pool.unsafeTraceLogStats(log) return nil } // recreateDatabaseGracefully continuosly tries to recreate the testdatabase and will retry/block until it succeeds func (pool *HashPool) recreateDatabaseGracefully(ctx context.Context, id int) error { - if ctx.Err() != nil { + log := pool.getPoolLogger(ctx, "recreateDatabaseGracefully").With().Int("id", id).Logger() + log.Debug().Msg("recreating...") + + if err := ctx.Err(); err != nil { // pool closed in the meantime. - return ctx.Err() + log.Error().Err(err).Msg("bailout pre locking ctx err") + return err } pool.Lock() - if pool.dbs[id].state != dbStateDirty { + if state := pool.dbs[id].state; state != dbStateDirty { // nothing to do + log.Error().Msgf("bailout not dbStateDirty state=%v", state) pool.Unlock() return nil } @@ -397,7 +430,7 @@ func (pool *HashPool) recreateDatabaseGracefully(ctx context.Context, id int) er default: try++ - fmt.Printf("recreateDatabaseGracefully: recreating ID='%v' try=%v...\n", id, try) + log.Trace().Int("try", try).Msg("trying to recreate...") err := pool.recreateDB(ctx, &testDB) if err != nil { // only still connected errors are worthy a retry @@ -408,10 +441,11 @@ func (pool *HashPool) recreateDatabaseGracefully(ctx context.Context, id int) er backoff = pool.PoolConfig.TestDatabaseRetryRecreateSleepMax } - fmt.Printf("recreateDatabaseGracefully: DB is still in use, will retry ID='%v' try=%v in backoff=%v.\n", id, try, backoff) + log.Warn().Int("try", try).Dur("backoff", backoff).Msg("DB is still in use, will retry...") time.Sleep(backoff) } else { - fmt.Printf("recreateDatabaseGracefully: bailout worker task DB error while cleanup ID='%v' try=%v err=%v\n", id, try, err) + + log.Error().Int("try", try).Err(err).Msg("bailout worker task DB error while cleanup!") return err } } else { @@ -429,11 +463,10 @@ MoveToReady: return ctx.Err() } - fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (recreateDatabaseGracefully %v)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize, id) - if pool.dbs[id].state == dbStateReady { // oups, it has been cleaned by another worker already // we won't add it to the 'ready' channel to avoid duplication + log.Warn().Msg("bailout DB has be cleaned by another worker as its already ready, skipping readd to ready channel!") return nil } @@ -443,6 +476,8 @@ MoveToReady: pool.ready <- pool.dbs[id].ID + log.Debug().Uint("generation", pool.dbs[id].generation).Msg("ready") + pool.unsafeTraceLogStats(log) return nil } @@ -451,6 +486,9 @@ MoveToReady: // Note that we generally gurantee FIFO when it comes to auto-cleaning as long as no manual unlock/recreates happen. func (pool *HashPool) autoCleanDirty(ctx context.Context) error { + log := pool.getPoolLogger(ctx, "autoCleanDirty") + log.Trace().Msg("autocleaning...") + ctx, task := trace.NewTask(ctx, "worker_clean_dirty") defer task.End() @@ -461,11 +499,13 @@ func (pool *HashPool) autoCleanDirty(ctx context.Context) error { return ctx.Err() default: // nothing to do - fmt.Println("autoCleanDirty noop") + log.Trace().Msg("noop") return nil } - fmt.Printf("pool#%s: autoCleanDirty id=%v\n", pool.templateDB.TemplateHash, id) + // got id... + log = log.With().Int("id", id).Logger() + log.Trace().Msg("checking cleaning prerequisites...") regLock := trace.StartRegion(ctx, "worker_wait_for_rlock_hash_pool") pool.RLock() @@ -473,6 +513,7 @@ func (pool *HashPool) autoCleanDirty(ctx context.Context) error { if id < 0 || id >= len(pool.dbs) { // sanity check, should never happen + log.Warn().Int("dbs", len(pool.dbs)).Msg("bailout invalid index!") pool.RUnlock() return ErrInvalidIndex } @@ -480,16 +521,19 @@ func (pool *HashPool) autoCleanDirty(ctx context.Context) error { blockedUntil := time.Until(pool.dbs[id].blockAutoCleanDirtyUntil) generation := pool.dbs[id].generation + log = log.With().Dur("blockedUntil", blockedUntil).Uint("generation", generation).Logger() + pool.RUnlock() // immediately pass to pool recreate if blockedUntil <= 0 { + log.Trace().Msg("clean now (immediate)!") return pool.recreateDatabaseGracefully(ctx, id) } // else we need to wait until we are allowed to work with it! // we block auto-cleaning until we are allowed to... - fmt.Printf("pool#%s: autoCleanDirty id=%v sleep for blockedUntil=%v...\n", pool.templateDB.TemplateHash, id, blockedUntil) + log.Warn().Msg("sleeping before being allowed to clean...") time.Sleep(blockedUntil) // we need to check that the testDB.generation did not change since we slept @@ -497,13 +541,14 @@ func (pool *HashPool) autoCleanDirty(ctx context.Context) error { pool.RLock() if pool.dbs[id].generation != generation || pool.dbs[id].state != dbStateDirty { - fmt.Printf("pool#%s: autoCleanDirty id=%v bailout old generation=%v vs new generation=%v state=%v...\n", pool.templateDB.TemplateHash, id, generation, pool.dbs[id].generation, pool.dbs[id].state) + log.Error().Msgf("bailout old generation=%v vs new generation=%v state=%v", generation, pool.dbs[id].generation, pool.dbs[id].state) pool.RUnlock() return nil } pool.RUnlock() + log.Trace().Msg("clean now (after sleep has happenend)!") return pool.recreateDatabaseGracefully(ctx, id) } @@ -521,6 +566,9 @@ func ignoreErrs(f func(ctx context.Context) error, errs ...error) func(context.C func (pool *HashPool) extend(ctx context.Context) error { + log := pool.getPoolLogger(ctx, "extend") + log.Trace().Msg("extending...") + ctx, task := trace.NewTask(ctx, "worker_extend") defer task.End() @@ -528,12 +576,12 @@ func (pool *HashPool) extend(ctx context.Context) error { pool.Lock() defer pool.Unlock() - fmt.Printf("pool#%s: ready=%d, dirty=%d, recreating=%d, tasksChan=%d, dbs=%d initial=%d max=%d (extend)\n", pool.templateDB.TemplateHash, len(pool.ready), len(pool.dirty), len(pool.recreating), len(pool.tasksChan), len(pool.dbs), pool.PoolConfig.InitialPoolSize, pool.PoolConfig.MaxPoolSize) reg.End() // get index of a next test DB - its ID index := len(pool.dbs) if index == cap(pool.dbs) { + log.Error().Int("dbs", len(pool.dbs)).Int("cap", cap(pool.dbs)).Err(ErrPoolFull).Msg("pool is full") return ErrPoolFull } @@ -551,6 +599,8 @@ func (pool *HashPool) extend(ctx context.Context) error { // set DB name newTestDB.Database.Config.Database = makeDBName(pool.TestDBNamePrefix, pool.templateDB.TemplateHash, index) + log.Trace().Int("id", index).Msg("adding...") + reg = trace.StartRegion(ctx, "worker_db_operation") err := pool.recreateDB(ctx, &newTestDB) reg.End() @@ -564,18 +614,25 @@ func (pool *HashPool) extend(ctx context.Context) error { pool.ready <- newTestDB.ID + pool.unsafeTraceLogStats(log) + return nil } func (pool *HashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) error { + log := pool.getPoolLogger(ctx, "RemoveAll") + // stop all workers pool.Stop() + // wait until all current "recreating" tasks are finished... + pool.Lock() defer pool.Unlock() if len(pool.dbs) == 0 { + log.Error().Msg("bailout no dbs.") return nil } @@ -584,17 +641,33 @@ func (pool *HashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) er testDB := pool.dbs[id].TestDatabase if err := removeFunc(ctx, testDB); err != nil { + log.Error().Int("id", id).Err(err).Msg("removeFunc testdatabase err") return err } if len(pool.dbs) > 1 { pool.dbs = pool.dbs[:len(pool.dbs)-1] } + + pool.excludeIDFromChannel(pool.dirty, id) + pool.excludeIDFromChannel(pool.ready, id) + log.Debug().Int("id", id).Msg("testdatabase removed!") } // close all only if removal of all succeeded pool.dbs = nil close(pool.tasksChan) + pool.unsafeTraceLogStats(log) + return nil } + +func (pool *HashPool) getPoolLogger(ctx context.Context, poolFunction string) zerolog.Logger { + return util.LogFromContext(ctx).With().Str("poolHash", pool.templateDB.TemplateHash).Str("poolFn", poolFunction).Logger() +} + +// unsafeTraceLogStats logs stats of this pool. Attention: pool should be read or write locked! +func (pool *HashPool) unsafeTraceLogStats(log zerolog.Logger) { + log.Trace().Int("ready", len(pool.ready)).Int("dirty", len(pool.dirty)).Int("recreating", len(pool.recreating)).Int("tasksChan", len(pool.tasksChan)).Int("dbs", len(pool.dbs)).Int("initial", pool.PoolConfig.InitialPoolSize).Int("max", pool.PoolConfig.MaxPoolSize).Msg("pool stats") +} diff --git a/pkg/pool/pool_collection.go b/pkg/pool/pool_collection.go index d4ccd46..82db735 100644 --- a/pkg/pool/pool_collection.go +++ b/pkg/pool/pool_collection.go @@ -105,7 +105,7 @@ func (p *PoolCollection) GetTestDatabase(ctx context.Context, hash string, timeo return db, err } - return pool.GetTestDatabase(ctx, hash, timeout) + return pool.GetTestDatabase(ctx, timeout) } // ReturnTestDatabase returns the given test DB directly to the pool, without cleaning (recreating it). diff --git a/pkg/util/context.go b/pkg/util/context.go new file mode 100644 index 0000000..00ce454 --- /dev/null +++ b/pkg/util/context.go @@ -0,0 +1,58 @@ +package util + +import ( + "context" + "errors" +) + +type contextKey string + +const ( + CTXKeyUser contextKey = "user" + CTXKeyAccessToken contextKey = "access_token" + CTXKeyRequestID contextKey = "request_id" + CTXKeyDisableLogger contextKey = "disable_logger" + CTXKeyCacheControl contextKey = "cache_control" +) + +// RequestIDFromContext returns the ID of the (HTTP) request, returning an error if it is not present. +func RequestIDFromContext(ctx context.Context) (string, error) { + val := ctx.Value(CTXKeyRequestID) + if val == nil { + return "", errors.New("No request ID present in context") + } + + id, ok := val.(string) + if !ok { + return "", errors.New("Request ID in context is not a string") + } + + return id, nil +} + +// ShouldDisableLogger checks whether the logger instance should be disabled for the provided context. +// `util.LogFromContext` will use this function to check whether it should return a default logger if +// none has been set by our logging middleware before, or fall back to the disabled logger, suppressing +// all output. Use `ctx = util.DisableLogger(ctx, true)` to disable logging for the given context. +func ShouldDisableLogger(ctx context.Context) bool { + s := ctx.Value(CTXKeyDisableLogger) + if s == nil { + return false + } + + shouldDisable, ok := s.(bool) + if !ok { + return false + } + + return shouldDisable +} + +// DisableLogger toggles the indication whether `util.LogFromContext` should return a disabled logger +// for a context if none has been set by our logging middleware before. Whilst the usecase for a disabled +// logger are relatively minimal (we almost always want to have some log output, even if the context +// was not directly derived from a HTTP request), this functionality was provideds so you can switch back +// to the old zerolog behavior if so desired. +func DisableLogger(ctx context.Context, shouldDisable bool) context.Context { + return context.WithValue(ctx, CTXKeyDisableLogger, shouldDisable) +} diff --git a/pkg/util/log.go b/pkg/util/log.go new file mode 100644 index 0000000..c1f4ceb --- /dev/null +++ b/pkg/util/log.go @@ -0,0 +1,42 @@ +package util + +import ( + "context" + + "github.com/labstack/echo/v4" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +// LogFromContext returns a request-specific zerolog instance using the provided context. +// The returned logger will have the request ID as well as some other value predefined. +// If no logger is associated with the context provided, the global zerolog instance +// will be returned instead - this function will _always_ return a valid (enabled) logger. +// Should you ever need to force a disabled logger for a context, use `util.DisableLogger(ctx, true)` +// and pass the context returned to other code/`LogFromContext`. +func LogFromContext(ctx context.Context) *zerolog.Logger { + l := log.Ctx(ctx) + if l.GetLevel() == zerolog.Disabled { + if ShouldDisableLogger(ctx) { + return l + } + l = &log.Logger + } + return l +} + +// LogFromEchoContext returns a request-specific zerolog instance using the echo.Context of the request. +// The returned logger will have the request ID as well as some other value predefined. +func LogFromEchoContext(c echo.Context) *zerolog.Logger { + return LogFromContext(c.Request().Context()) +} + +func LogLevelFromString(s string) zerolog.Level { + l, err := zerolog.ParseLevel(s) + if err != nil { + log.Error().Err(err).Msgf("Failed to parse log level, defaulting to %s", zerolog.DebugLevel) + return zerolog.DebugLevel + } + + return l +} diff --git a/pkg/util/log_test.go b/pkg/util/log_test.go new file mode 100644 index 0000000..c318b74 --- /dev/null +++ b/pkg/util/log_test.go @@ -0,0 +1,20 @@ +package util_test + +import ( + "testing" + + "github.com/allaboutapps/integresql/pkg/util" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" +) + +func TestLogLevelFromString(t *testing.T) { + res := util.LogLevelFromString("panic") + assert.Equal(t, zerolog.PanicLevel, res) + + res = util.LogLevelFromString("warn") + assert.Equal(t, zerolog.WarnLevel, res) + + res = util.LogLevelFromString("foo") + assert.Equal(t, zerolog.DebugLevel, res) +} From 7b99232ec350445edf06f1b85dc650f32b32616d Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 5 Sep 2023 17:15:34 +0200 Subject: [PATCH 144/160] don't lock while extending, reuse recreateDatabaseGracefully --- pkg/manager/manager_test.go | 15 ++++++++++++--- pkg/pool/pool.go | 26 ++++++++------------------ 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 45f2a44..8765fd2 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -822,19 +822,28 @@ func TestManagerReturnTestDatabase(t *testing.T) { // finally return it assert.NoError(t, m.ReturnTestDatabase(ctx, hash, testDB1.ID)) + // regetting these databases is quite random. Let's try to get the same id again... // on first GET call the pool has been extended // we will get the newly created DB testDB2, err := m.GetTestDatabase(ctx, hash) assert.NoError(t, err) - assert.NotEqual(t, testDB1.ID, testDB2.ID) // next in 'ready' channel should be the returned DB testDB3, err := m.GetTestDatabase(ctx, hash) assert.NoError(t, err) - assert.Equal(t, testDB1.ID, testDB3.ID) + + // restored db + var targetConnectionString string + if testDB2.ID == testDB1.ID { + targetConnectionString = testDB2.Config.ConnectionString() + } else if testDB3.ID == testDB1.ID { + targetConnectionString = testDB3.Config.ConnectionString() + } else { + t.Fatal("We should have been able to get the previously returned database.") + } // assert that it hasn't been cleaned but just reused directly - db, err = sql.Open("postgres", testDB3.Config.ConnectionString()) + db, err = sql.Open("postgres", targetConnectionString) require.NoError(t, err) require.NoError(t, db.PingContext(ctx)) diff --git a/pkg/pool/pool.go b/pkg/pool/pool.go index 869e581..b34cae8 100644 --- a/pkg/pool/pool.go +++ b/pkg/pool/pool.go @@ -574,20 +574,19 @@ func (pool *HashPool) extend(ctx context.Context) error { reg := trace.StartRegion(ctx, "worker_wait_for_lock_hash_pool") pool.Lock() - defer pool.Unlock() - reg.End() // get index of a next test DB - its ID index := len(pool.dbs) if index == cap(pool.dbs) { log.Error().Int("dbs", len(pool.dbs)).Int("cap", cap(pool.dbs)).Err(ErrPoolFull).Msg("pool is full") + pool.Unlock() return ErrPoolFull } - // initalization of a new DB using template config + // initalization of a new DB using template config, it must start in state dirty! newTestDB := existingDB{ - state: dbStateReady, + state: dbStateDirty, TestDatabase: db.TestDatabase{ Database: db.Database{ TemplateHash: pool.templateDB.TemplateHash, @@ -599,24 +598,15 @@ func (pool *HashPool) extend(ctx context.Context) error { // set DB name newTestDB.Database.Config.Database = makeDBName(pool.TestDBNamePrefix, pool.templateDB.TemplateHash, index) - log.Trace().Int("id", index).Msg("adding...") - - reg = trace.StartRegion(ctx, "worker_db_operation") - err := pool.recreateDB(ctx, &newTestDB) - reg.End() - - if err != nil { - return err - } - - // add new test DB to the pool + // add new test DB to the pool (currently it's dirty!) pool.dbs = append(pool.dbs, newTestDB) - pool.ready <- newTestDB.ID - + log.Trace().Int("id", index).Msg("appended as dirty, recreating...") pool.unsafeTraceLogStats(log) + pool.Unlock() - return nil + // forced recreate... + return pool.recreateDatabaseGracefully(ctx, index) } func (pool *HashPool) RemoveAll(ctx context.Context, removeFunc RemoveDBFunc) error { From a17f2459189ed7df5aa3f71a04acad59a5361cf4 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Mon, 8 Jan 2024 15:04:55 +0100 Subject: [PATCH 145/160] bump deps --- Dockerfile | 14 +++---- README.md | 9 ++++- go.mod | 26 ++++++------- go.sum | 77 ++++++++++++++++++-------------------- tests/testclient/client.go | 5 ++- 5 files changed, 67 insertions(+), 64 deletions(-) diff --git a/Dockerfile b/Dockerfile index b7d3416..a605dfd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ # --- https://hub.docker.com/_/golang # --- https://github.com/microsoft/vscode-remote-try-go/blob/master/.devcontainer/Dockerfile ### ----------------------- -FROM golang:1.20.5-bullseye AS development +FROM golang:1.21.5-bullseye AS development # Avoid warnings by switching to noninteractive ENV DEBIAN_FRONTEND=noninteractive @@ -79,9 +79,9 @@ ENV LANG en_US.UTF-8 # https://github.com/darold/pgFormatter/releases RUN mkdir -p /tmp/pgFormatter \ && cd /tmp/pgFormatter \ - && wget https://github.com/darold/pgFormatter/archive/v5.3.tar.gz \ - && tar xzf v5.3.tar.gz \ - && cd pgFormatter-5.3 \ + && wget https://github.com/darold/pgFormatter/archive/v5.5.tar.gz \ + && tar xzf v5.5.tar.gz \ + && cd pgFormatter-5.5 \ && perl Makefile.PL \ && make && make install \ && rm -rf /tmp/pgFormatter @@ -91,8 +91,8 @@ RUN mkdir -p /tmp/pgFormatter \ RUN mkdir -p /tmp/gotestsum \ && cd /tmp/gotestsum \ && ARCH="$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)" \ - && wget "https://github.com/gotestyourself/gotestsum/releases/download/v1.9.0/gotestsum_1.9.0_linux_${ARCH}.tar.gz" \ - && tar xzf "gotestsum_1.9.0_linux_${ARCH}.tar.gz" \ + && wget "https://github.com/gotestyourself/gotestsum/releases/download/v1.11.0/gotestsum_1.11.0_linux_${ARCH}.tar.gz" \ + && tar xzf "gotestsum_1.11.0_linux_${ARCH}.tar.gz" \ && cp gotestsum /usr/local/bin/gotestsum \ && rm -rf /tmp/gotestsum @@ -100,7 +100,7 @@ RUN mkdir -p /tmp/gotestsum \ # https://github.com/golangci/golangci-lint#binary # https://github.com/golangci/golangci-lint/releases RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh \ - | sh -s -- -b $(go env GOPATH)/bin v1.52.2 + | sh -s -- -b $(go env GOPATH)/bin v1.55.2 # go swagger: (this package should NOT be installed via go get) # https://github.com/go-swagger/go-swagger/releases diff --git a/README.md b/README.md index 6071f42..66056fd 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,7 @@ Do your engineers a favour by allowing them to write fast executing, parallel an - [Development setup](#development-setup) - [Development quickstart](#development-quickstart) - [Maintainers](#maintainers) + - [Previous maintainers](#previous-maintainers) - [License](#license) ## Background @@ -455,9 +456,13 @@ integresql ## Maintainers -- [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) - [Mario Ranftl - @majodev](https://github.com/majodev) +## Previous maintainers + +- [Anna Jankowska - @anjankow](https://github.com/anjankow) +- [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) + ## License -[MIT](LICENSE) © 2020 aaa – all about apps GmbH | Nick Müller | Mario Ranftl and the `IntegreSQL` project contributors +[MIT](LICENSE) © 2020-2024 aaa – all about apps GmbH | Nick Müller | Mario Ranftl and the `IntegreSQL` project contributors diff --git a/go.mod b/go.mod index b4e6479..0e7dfcb 100644 --- a/go.mod +++ b/go.mod @@ -3,29 +3,29 @@ module github.com/allaboutapps/integresql go 1.20 require ( - github.com/google/uuid v1.3.0 - github.com/labstack/echo/v4 v4.10.2 + github.com/google/uuid v1.5.0 + github.com/labstack/echo/v4 v4.11.4 github.com/lib/pq v1.10.9 - github.com/rs/zerolog v1.28.0 + github.com/rs/zerolog v1.31.0 github.com/stretchr/testify v1.8.4 - golang.org/x/sync v0.3.0 + golang.org/x/sync v0.6.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect - github.com/kr/pretty v0.2.1 // indirect - github.com/labstack/gommon v0.4.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/labstack/gommon v0.4.2 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/time v0.3.0 // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 3701fcb..18140f5 100644 --- a/go.sum +++ b/go.sum @@ -1,66 +1,63 @@ -github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/labstack/echo/v4 v4.10.2 h1:n1jAhnq/elIFTHr1EYpiYtyKgx4RW9ccVgkqByZaN2M= -github.com/labstack/echo/v4 v4.10.2/go.mod h1:OEyqf2//K1DFdE57vw2DRgWY0M7s65IVQO2FzvI4J5k= -github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8= -github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.11.4 h1:vDZmA+qNeh1pd/cCkEicDMrjtrnMGQ1QFI9gWN1zGq8= +github.com/labstack/echo/v4 v4.11.4/go.mod h1:noh7EvLwqDsmh/X/HWKPUl1AjzJrhyptRyEbQJfxen8= +github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= +github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= -github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tests/testclient/client.go b/tests/testclient/client.go index 503a241..0b4c171 100644 --- a/tests/testclient/client.go +++ b/tests/testclient/client.go @@ -129,9 +129,10 @@ func (c *Client) SetupTemplate(ctx context.Context, hash string, init func(conn return c.FinalizeTemplate(ctx, hash) } else if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { return nil - } else { - return err } + + return err + } func (c *Client) SetupTemplateWithDBClient(ctx context.Context, hash string, init func(db *sql.DB) error) error { From a2d84e786b41509feb842667143f4e79596f4303 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Wed, 24 Jan 2024 12:22:49 +0100 Subject: [PATCH 146/160] add github actions multiarch build and publish --- .github/workflows/build-publish.yml | 53 +++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 .github/workflows/build-publish.yml diff --git a/.github/workflows/build-publish.yml b/.github/workflows/build-publish.yml new file mode 100644 index 0000000..febf3a2 --- /dev/null +++ b/.github/workflows/build-publish.yml @@ -0,0 +1,53 @@ +# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images +name: Build and push image + +on: + push: + tags: "**" + branches: "**" + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + +jobs: + + build-and-push-image: + + name: Build and push image + runs-on: ubuntu-latest + + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Log in to the Container registry + uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Build and Push Image + uses: docker/build-push-action@v3 + with: + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 + push: true \ No newline at end of file From f135ee06b3429fafcb1943b5f90b220477698dbb Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Wed, 24 Jan 2024 13:22:51 +0100 Subject: [PATCH 147/160] upgrade golang and go.mod deps, prepare changelog (WIP), rearrange readme --- CHANGELOG.md | 16 +++ Dockerfile | 4 +- README.md | 313 ++++++++++++++++++++++++++------------------------- go.mod | 6 +- go.sum | 12 +- 5 files changed, 184 insertions(+), 167 deletions(-) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..7de7872 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,16 @@ +# Changelog + +- All notable changes to this project will be documented in this file. +- The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). +- We try to follow [semantic versioning](https://semver.org/). +- All changes have a **git tag** available, are build and published to GitHub packages as a docker image. + +## Unreleased + +## v1.1.0 +- First of all, even though this is a **major refactor**, the clientside API is still the same. **There should be no breaking changes!** +- The main goal of this release is to bring IntegreSQL's performance on par with our previous Node.js implementation. Specifially we wanted to eliminate any long-running mutex locks and make the codebase more maintainable and easier to extend in the future. +- ... + +## v1.0.0 +- Initial release May 2020 diff --git a/Dockerfile b/Dockerfile index 8823c0f..b0907ca 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ # --- https://hub.docker.com/_/golang # --- https://github.com/microsoft/vscode-remote-try-go/blob/master/.devcontainer/Dockerfile ### ----------------------- -FROM golang:1.21.5-bullseye AS development +FROM golang:1.21.6-bullseye AS development # Avoid warnings by switching to noninteractive ENV DEBIAN_FRONTEND=noninteractive @@ -128,7 +128,7 @@ RUN mkdir -p /tmp/watchexec \ RUN mkdir -p /tmp/yq \ && cd /tmp/yq \ && ARCH="$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)" \ - && wget "https://github.com/mikefarah/yq/releases/download/v4.30.5/yq_linux_${ARCH}.tar.gz" \ + && wget "https://github.com/mikefarah/yq/releases/download/v4.40.5/yq_linux_${ARCH}.tar.gz" \ && tar xzf "yq_linux_${ARCH}.tar.gz" \ && cp "yq_linux_${ARCH}" /usr/local/bin/yq \ && rm -rf /tmp/yq diff --git a/README.md b/README.md index 66056fd..a6e4387 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,16 @@ Do your engineers a favour by allowing them to write fast executing, parallel an [![](https://img.shields.io/docker/image-size/allaboutapps/integresql)](https://hub.docker.com/r/allaboutapps/integresql) [![](https://img.shields.io/docker/pulls/allaboutapps/integresql)](https://hub.docker.com/r/allaboutapps/integresql) [![Docker Cloud Build Status](https://img.shields.io/docker/cloud/build/allaboutapps/integresql)](https://hub.docker.com/r/allaboutapps/integresql) [![](https://goreportcard.com/badge/github.com/allaboutapps/integresql)](https://goreportcard.com/report/github.com/allaboutapps/integresql) ![](https://github.com/allaboutapps/integresql/workflows/build/badge.svg?branch=master) - [IntegreSQL](#integresql) + - [Integrate by client lib](#integrate-by-client-lib) + - [Integrate by RESTful JSON calls](#integrate-by-restful-json-calls) + - [Demo](#demo) + - [Install](#install) + - [Install using Docker (preferred)](#install-using-docker-preferred) + - [Install locally](#install-locally) + - [Configuration](#configuration) + - [Usage](#usage) + - [Run using Docker (preferred)](#run-using-docker-preferred) + - [Run locally](#run-locally) - [Background](#background) - [Approach 0: Leaking database mutations for subsequent tests](#approach-0-leaking-database-mutations-for-subsequent-tests) - [Approach 1: Isolating by resetting](#approach-1-isolating-by-resetting) @@ -18,16 +28,6 @@ Do your engineers a favour by allowing them to write fast executing, parallel an - [Approach 3c benchmark 1: Baseline](#approach-3c-benchmark-1-baseline) - [Approach 3c benchmark 2: Small project](#approach-3c-benchmark-2-small-project) - [Final approach: IntegreSQL](#final-approach-integresql) - - [Integrate by client lib](#integrate-by-client-lib) - - [Integrate by RESTful JSON calls](#integrate-by-restful-json-calls) - - [Demo](#demo) - - [Install](#install) - - [Install using Docker (preferred)](#install-using-docker-preferred) - - [Install locally](#install-locally) - - [Configuration](#configuration) - - [Usage](#usage) - - [Run using Docker (preferred)](#run-using-docker-preferred) - - [Run locally](#run-locally) - [Contributing](#contributing) - [Development setup](#development-setup) - [Development quickstart](#development-quickstart) @@ -35,6 +35,151 @@ Do your engineers a favour by allowing them to write fast executing, parallel an - [Previous maintainers](#previous-maintainers) - [License](#license) +#### Integrate by client lib + +The flow above might look intimidating at first glance, but trust us, it's simple to integrate especially if there is already an client library available for your specific language. We currently have those: + +* Go: [integresql-client-go](https://github.com/allaboutapps/integresql-client-go) by [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) +* Python: [integresql-client-python](https://github.com/msztolcman/integresql-client-python) by [Marcin Sztolcman - @msztolcman](https://github.com/msztolcman) +* .NET: [IntegreSQL.EF](https://github.com/mcctomsk/IntegreSql.EF) by [Artur Drobinskiy - @Shaddix](https://github.com/Shaddix) +* JavaScript/TypeScript: [@devoxa/integresql-client](https://github.com/devoxa/integresql-client) by [Devoxa - @devoxa](https://github.com/devoxa) +* ... *Add your link here and make a PR* + +#### Integrate by RESTful JSON calls + +A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. + +#### Demo + +If you want to take a look on how we integrate IntegreSQL - 🤭 - please just try our [go-starter](https://github.com/allaboutapps/go-starter) project or take a look at our [test_database setup code](https://github.com/allaboutapps/go-starter/blob/master/internal/test/test_database.go). + +## Install + +### Install using Docker (preferred) + +A minimal Docker image containing a pre-built `IntegreSQL` executable is available at [Docker Hub](https://hub.docker.com/r/allaboutapps/integresql). + +```bash +docker pull allaboutapps/integresql +``` + +### Install locally + +Installing `IntegreSQL` locally requires a working [Go](https://golang.org/dl/) (1.14 or above) environment. Install the `IntegreSQL` executable to your Go bin folder: + +```bash +go get github.com/allaboutapps/integresql/cmd/server +``` + +## Configuration + +`IntegreSQL` requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: + +| Description | Environment variable | Default | Required | +| ----------------------------------------------------------------- | ------------------------------------- | -------------------- | -------- | +| IntegreSQL: listen address (defaults to all if empty) | `INTEGRESQL_ADDRESS` | `""` | | +| IntegreSQL: port | `INTEGRESQL_PORT` | `5000` | | +| PostgreSQL: host | `INTEGRESQL_PGHOST`, `PGHOST` | `"127.0.0.1"` | Yes | +| PostgreSQL: port | `INTEGRESQL_PGPORT`, `PGPORT` | `5432` | | +| PostgreSQL: username | `INTEGRESQL_PGUSER`, `PGUSER`, `USER` | `"postgres"` | Yes | +| PostgreSQL: password | `INTEGRESQL_PGPASSWORD`, `PGPASSWORD` | `""` | Yes | +| PostgreSQL: database for manager | `INTEGRESQL_PGDATABASE` | `"postgres"` | | +| PostgreSQL: template database to use | `INTEGRESQL_ROOT_TEMPLATE` | `"template0"` | | +| Managed databases: prefix | `INTEGRESQL_DB_PREFIX` | `"integresql"` | | +| Managed *template* databases: prefix `integresql_template_` | `INTEGRESQL_TEMPLATE_DB_PREFIX` | `"template"` | | +| Managed *test* databases: prefix `integresql_test__` | `INTEGRESQL_TEST_DB_PREFIX` | `"test"` | | +| Managed *test* databases: username | `INTEGRESQL_TEST_PGUSER` | PostgreSQL: username | | +| Managed *test* databases: password | `INTEGRESQL_TEST_PGPASSWORD` | PostgreSQL: password | | +| Managed *test* databases: minimal test pool size | `INTEGRESQL_TEST_INITIAL_POOL_SIZE` | `10` | | +| Managed *test* databases: maximal test pool size | `INTEGRESQL_TEST_MAX_POOL_SIZE` | `500` | | + + +## Usage + +### Run using Docker (preferred) + +Simply start the `IntegreSQL` [Docker](https://docs.docker.com/install/) (19.03 or above) container, provide the required environment variables and expose the server port: + +```bash +docker run -d --name integresql -e INTEGRESQL_PORT=5000 -p 5000:5000 allaboutapps/integresql +``` + +`IntegreSQL` can also be included in your project via [Docker Compose](https://docs.docker.com/compose/install/) (1.25 or above): + +```yaml +version: "3.4" +services: + + # Your main service image + service: + depends_on: + - postgres + - integresql + environment: + PGDATABASE: &PGDATABASE "development" + PGUSER: &PGUSER "dbuser" + PGPASSWORD: &PGPASSWORD "9bed16f749d74a3c8bfbced18a7647f5" + PGHOST: &PGHOST "postgres" + PGPORT: &PGPORT "5432" + PGSSLMODE: &PGSSLMODE "disable" + + # optional: env for integresql client testing + # see https://github.com/allaboutapps/integresql-client-go + # INTEGRESQL_CLIENT_BASE_URL: "http://integresql:5000/api" + + # [...] additional main service setup + + integresql: + image: allaboutapps/integresql:1.0.0 + ports: + - "5000:5000" + depends_on: + - postgres + environment: + PGHOST: *PGHOST + PGUSER: *PGUSER + PGPASSWORD: *PGPASSWORD + + postgres: + image: postgres:12.2-alpine # should be the same version as used live + # ATTENTION + # fsync=off, synchronous_commit=off and full_page_writes=off + # gives us a major speed up during local development and testing (~30%), + # however you should NEVER use these settings in PRODUCTION unless + # you want to have CORRUPTED data. + # DO NOT COPY/PASTE THIS BLINDLY. + # YOU HAVE BEEN WARNED. + # Apply some performance improvements to pg as these guarantees are not needed while running locally + command: "postgres -c 'shared_buffers=128MB' -c 'fsync=off' -c 'synchronous_commit=off' -c 'full_page_writes=off' -c 'max_connections=100' -c 'client_min_messages=warning'" + expose: + - "5432" + ports: + - "5432:5432" + environment: + POSTGRES_DB: *PGDATABASE + POSTGRES_USER: *PGUSER + POSTGRES_PASSWORD: *PGPASSWORD + volumes: + - pgvolume:/var/lib/postgresql/data + +volumes: + pgvolume: # declare a named volume to persist DB data +``` + +You may also refer to our [go-starter `docker-compose.yml`](https://github.com/allaboutapps/go-starter/blob/master/docker-compose.yml). + +### Run locally + +Running the `IntegreSQL` server locally requires configuration via exported environment variables (see below): + +```bash +export INTEGRESQL_PORT=5000 +export PGHOST=127.0.0.1 +export PGUSER=test +export PGPASSWORD=testpass +integresql +``` + ## Background We came a long way to realize that something just did not feel right with our PostgreSQL integration testing strategies. @@ -263,150 +408,6 @@ Our flow now finally changed to this: * ... * Subsequent 1..n test runners start/end in parallel and reuse the above logic -#### Integrate by client lib - -The flow above might look intimidating at first glance, but trust us, it's simple to integrate especially if there is already an client library available for your specific language. We currently have those: - -* Go: [integresql-client-go](https://github.com/allaboutapps/integresql-client-go) by [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) -* Python: [integresql-client-python](https://github.com/msztolcman/integresql-client-python) by [Marcin Sztolcman - @msztolcman](https://github.com/msztolcman) -* .NET: [IntegreSQL.EF](https://github.com/mcctomsk/IntegreSql.EF) by [Artur Drobinskiy - @Shaddix](https://github.com/Shaddix) -* JavaScript/TypeScript: [@devoxa/integresql-client](https://github.com/devoxa/integresql-client) by [Devoxa - @devoxa](https://github.com/devoxa) -* ... *Add your link here and make a PR* - -#### Integrate by RESTful JSON calls - -A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. - -#### Demo - -If you want to take a look on how we integrate IntegreSQL - 🤭 - please just try our [go-starter](https://github.com/allaboutapps/go-starter) project or take a look at our [testing setup code](https://github.com/allaboutapps/go-starter/blob/master/internal/test/testing.go). - -## Install - -### Install using Docker (preferred) - -A minimal Docker image containing a pre-built `IntegreSQL` executable is available at [Docker Hub](https://hub.docker.com/r/allaboutapps/integresql). - -```bash -docker pull allaboutapps/integresql -``` - -### Install locally - -Installing `IntegreSQL` locally requires a working [Go](https://golang.org/dl/) (1.14 or above) environment. Install the `IntegreSQL` executable to your Go bin folder: - -```bash -go get github.com/allaboutapps/integresql/cmd/server -``` - -## Configuration - -`IntegreSQL` requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: - -| Description | Environment variable | Default | Required | -| ----------------------------------------------------------------- | ------------------------------------- | -------------------- | -------- | -| IntegreSQL: listen address (defaults to all if empty) | `INTEGRESQL_ADDRESS` | `""` | | -| IntegreSQL: port | `INTEGRESQL_PORT` | `5000` | | -| PostgreSQL: host | `INTEGRESQL_PGHOST`, `PGHOST` | `"127.0.0.1"` | Yes | -| PostgreSQL: port | `INTEGRESQL_PGPORT`, `PGPORT` | `5432` | | -| PostgreSQL: username | `INTEGRESQL_PGUSER`, `PGUSER`, `USER` | `"postgres"` | Yes | -| PostgreSQL: password | `INTEGRESQL_PGPASSWORD`, `PGPASSWORD` | `""` | Yes | -| PostgreSQL: database for manager | `INTEGRESQL_PGDATABASE` | `"postgres"` | | -| PostgreSQL: template database to use | `INTEGRESQL_ROOT_TEMPLATE` | `"template0"` | | -| Managed databases: prefix | `INTEGRESQL_DB_PREFIX` | `"integresql"` | | -| Managed *template* databases: prefix `integresql_template_` | `INTEGRESQL_TEMPLATE_DB_PREFIX` | `"template"` | | -| Managed *test* databases: prefix `integresql_test__` | `INTEGRESQL_TEST_DB_PREFIX` | `"test"` | | -| Managed *test* databases: username | `INTEGRESQL_TEST_PGUSER` | PostgreSQL: username | | -| Managed *test* databases: password | `INTEGRESQL_TEST_PGPASSWORD` | PostgreSQL: password | | -| Managed *test* databases: minimal test pool size | `INTEGRESQL_TEST_INITIAL_POOL_SIZE` | `10` | | -| Managed *test* databases: maximal test pool size | `INTEGRESQL_TEST_MAX_POOL_SIZE` | `500` | | - - -## Usage - -### Run using Docker (preferred) - -Simply start the `IntegreSQL` [Docker](https://docs.docker.com/install/) (19.03 or above) container, provide the required environment variables and expose the server port: - -```bash -docker run -d --name integresql -e INTEGRESQL_PORT=5000 -p 5000:5000 allaboutapps/integresql -``` - -`IntegreSQL` can also be included in your project via [Docker Compose](https://docs.docker.com/compose/install/) (1.25 or above): - -```yaml -version: "3.4" -services: - - # Your main service image - service: - depends_on: - - postgres - - integresql - environment: - PGDATABASE: &PGDATABASE "development" - PGUSER: &PGUSER "dbuser" - PGPASSWORD: &PGPASSWORD "9bed16f749d74a3c8bfbced18a7647f5" - PGHOST: &PGHOST "postgres" - PGPORT: &PGPORT "5432" - PGSSLMODE: &PGSSLMODE "disable" - - # optional: env for integresql client testing - # see https://github.com/allaboutapps/integresql-client-go - # INTEGRESQL_CLIENT_BASE_URL: "http://integresql:5000/api" - - # [...] additional main service setup - - integresql: - image: allaboutapps/integresql:1.0.0 - ports: - - "5000:5000" - depends_on: - - postgres - environment: - PGHOST: *PGHOST - PGUSER: *PGUSER - PGPASSWORD: *PGPASSWORD - - postgres: - image: postgres:12.2-alpine # should be the same version as used live - # ATTENTION - # fsync=off, synchronous_commit=off and full_page_writes=off - # gives us a major speed up during local development and testing (~30%), - # however you should NEVER use these settings in PRODUCTION unless - # you want to have CORRUPTED data. - # DO NOT COPY/PASTE THIS BLINDLY. - # YOU HAVE BEEN WARNED. - # Apply some performance improvements to pg as these guarantees are not needed while running locally - command: "postgres -c 'shared_buffers=128MB' -c 'fsync=off' -c 'synchronous_commit=off' -c 'full_page_writes=off' -c 'max_connections=100' -c 'client_min_messages=warning'" - expose: - - "5432" - ports: - - "5432:5432" - environment: - POSTGRES_DB: *PGDATABASE - POSTGRES_USER: *PGUSER - POSTGRES_PASSWORD: *PGPASSWORD - volumes: - - pgvolume:/var/lib/postgresql/data - -volumes: - pgvolume: # declare a named volume to persist DB data -``` - -You may also refer to our [go-starter `docker-compose.yml`](https://github.com/allaboutapps/go-starter/blob/master/docker-compose.yml). - -### Run locally - -Running the `IntegreSQL` server locally requires configuration via exported environment variables (see below): - -```bash -export INTEGRESQL_PORT=5000 -export PGHOST=127.0.0.1 -export PGUSER=test -export PGPASSWORD=testpass -integresql -``` ## Contributing @@ -458,9 +459,9 @@ integresql - [Mario Ranftl - @majodev](https://github.com/majodev) -## Previous maintainers +### Previous maintainers -- [Anna Jankowska - @anjankow](https://github.com/anjankow) +- [Anna - @anjankow](https://github.com/anjankow) - [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) ## License diff --git a/go.mod b/go.mod index 0e7dfcb..85a522b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/allaboutapps/integresql go 1.20 require ( - github.com/google/uuid v1.5.0 + github.com/google/uuid v1.6.0 github.com/labstack/echo/v4 v4.11.4 github.com/lib/pq v1.10.9 github.com/rs/zerolog v1.31.0 @@ -21,8 +21,8 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/net v0.19.0 // indirect + golang.org/x/crypto v0.18.0 // indirect + golang.org/x/net v0.20.0 // indirect golang.org/x/sys v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect diff --git a/go.sum b/go.sum index 18140f5..2c1be9a 100644 --- a/go.sum +++ b/go.sum @@ -5,8 +5,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -41,10 +41,10 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 9e4ca7f14406b6464638ec5c17a87c4fecac891f Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Mon, 29 Jan 2024 18:08:10 +0100 Subject: [PATCH 148/160] WIP docs and changelog, generic timeout handling --- .dockerignore | 3 +- CHANGELOG.md | 120 +++++++++++++- README.md | 249 +++++++++++++++++++--------- docs/benchmark_v1_1_0.png | Bin 0 -> 382521 bytes internal/api/server_config.go | 13 +- internal/api/templates/templates.go | 28 +--- internal/router/router.go | 6 + pkg/manager/manager.go | 4 + pkg/manager/manager_config.go | 7 +- 9 files changed, 322 insertions(+), 108 deletions(-) create mode 100644 docs/benchmark_v1_1_0.png diff --git a/.dockerignore b/.dockerignore index 21c5356..e3c57a2 100644 --- a/.dockerignore +++ b/.dockerignore @@ -5,4 +5,5 @@ .tools-versions Dockerfile docker-compose.* -docker-helper.sh \ No newline at end of file +docker-helper.sh +docs diff --git a/CHANGELOG.md b/CHANGELOG.md index 7de7872..4dc5b42 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +- [Changelog](#changelog) + - [Structure](#structure) + - [Unreleased](#unreleased) + - [v1.1.0](#v110) + - [General](#general) + - [Known issues](#known-issues) + - [Added](#added) + - [Changed](#changed) + - [Environment Variables](#environment-variables) + - [Manager/Pool-related](#managerpool-related) + - [Server-related](#server-related) + - [v1.0.0](#v100) + + +## Structure + - All notable changes to this project will be documented in this file. - The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - We try to follow [semantic versioning](https://semver.org/). @@ -8,9 +24,107 @@ ## Unreleased ## v1.1.0 -- First of all, even though this is a **major refactor**, the clientside API is still the same. **There should be no breaking changes!** -- The main goal of this release is to bring IntegreSQL's performance on par with our previous Node.js implementation. Specifially we wanted to eliminate any long-running mutex locks and make the codebase more maintainable and easier to extend in the future. -- ... + +> Special thanks to [Anna - @anjankow](https://github.com/anjankow) for her contributions to this release! + +### General +- Major refactor of the pool manager, while the API should still be backwards-compatible. There should not be any breaking changes when it comes to using the client libraries. +- The main goal of this release is to bring IntegreSQL's performance on par with our previous native Node.js implementation. + - Specifially we wanted to eliminate some long-running mutex locks (especially when the pool hits the configured pool limit) and make the codebase more maintainable and easier to extend in the future. + - While the above should be already visible in CI-environment, the subjective performance gain while developing locally could be even bigger. + +### Known issues +- We still have no mechanism to limit the global (cross-pool) number of test-databases. + - This is especially problematic if you have many pools running at the same time. + - This could lead to situations where the pool manager is unable to create a new test-databases because the limit (e.g. disk size) is reached even tough some pools/test-databases will probably never be used again. + - This is a **known issue** and will be addressed in a future release. +- OpenAPI/Swagger API documentation is still missing, we are working on it. + +### Added +- GitHub Packages + - Going forward, images are built via GitHub Actions and published to GitHub packages. +- ARM Docker images + - Arm64 is now supported (Apple Silicon M1/M2/M3), we publish a multi-arch image (`linux/amd64,linux/arm64`). +- We added the `POST /api/v1/templates/:hash/tests/:id/recreate` endpoint to the API. + - You can use it to express that you no longer using this database and it can be recreated and returned to the pool. + - Using this endpoint means you want to break out of our FIFO (first in, first out) recreating queue and get your test-database recreated as soon as possible. + - Explicitly calling recreate is **optional** of course! +- Minor: Added woodpecker/drone setup (internal allaboutapps CI/CD) + +### Changed +- Redesigned Database Pool Logic and Template Management + - Reimplemented pool and template logic, separated DB template management from test DB pool, and added per pool workers for preparing test DBs in the background. +- Soft-deprecated the `DELETE /api/v1/templates/:hash/tests/:id` endpoint in favor of `POST /api/v1/templates/:hash/tests/:id/unlock`. + - We did a bad job describing the previous functionality of this endpoint: It's really only deleting the lock, so the exact same test-database can be used again. + - The new `POST /api/v1/templates/:hash/tests/:id/recreate` vs. `POST /api/v1/templates/:hash/tests/:id/unlock` endpoint naming is way more explicit in what it does. + - Closes [#13](https://github.com/allaboutapps/integresql/issues/13) +- Logging and Debugging Improvements + - Introduced zerolog for better logging in the pool and manager modules. Debug statements were refined, and unnecessary print debugging was disabled. + +### Environment Variables + +There have been quite a few additions and changes, thus we have the in-depth details here. + +#### Manager/Pool-related + +- Added `INTEGRESQL_TEST_MAX_POOL_SIZE`: + - Maximal pool size that won't be exceeded + - Defaults to "your number of CPU cores 4 times" [`runtime.NumCPU()*4`](https://pkg.go.dev/runtime#NumCPU) + - Previous default was `500` (hardcoded) + - This might be a **significant change** for some usecases, please adjust accordingly. The pooling recreation logic is now much faster, there is typically no need to have such a high limit of test-databases **per pool**! +- Added `INTEGRESQL_TEST_INITIAL_POOL_SIZE`: + - Initial number of ready DBs prepared in background. The pool is configured to always try to have this number of ready DBs available (it actively tries to recreate databases within the pool in a FIFO manner). + - Defaults to [`runtime.NumCPU()`](https://pkg.go.dev/runtime#NumCPU) + - Previous default was `10` (hardcoded) +- Added `INTEGRESQL_POOL_MAX_PARALLEL_TASKS`: + - Maximal number of pool tasks running in parallel. Must be a number greater or equal 1. + - Defaults to [`runtime.NumCPU()`](https://pkg.go.dev/runtime#NumCPU) +- Added `INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MIN_MS`: + - Minimal time to wait after a test db recreate has failed (e.g. as client is still connected). Subsequent retries multiply this values until the maximum (below) is reached. + - Defaults to `250`ms +- Added `INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MAX_MS`: + - The maximum possible sleep time between recreation retries (e.g. 3 seconds), see above. + - Defaults to `3000`ms +- Added `INTEGRESQL_TEST_DB_MINIMAL_LIFETIME_MS`: + - After a test-database transitions from `ready` to `dirty`, always block auto-recreation (FIFO) for this duration (expect `POST /api/v1/templates/:hash/tests/:id/recreate` was called manually). + - Defaults to `250`ms +- Added `INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS`: + - Internal time to wait for a template-database to transition into the 'finalized' state + - Defaults to `60000`ms (1 minute, same as `INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS`) +- Added `INTEGRESQL_TEST_DB_GET_TIMEOUT_MS`: + - Internal time to wait for a ready database (requested via `/api/v1/templates/:hash/tests`) + - Defaults to `60000`ms (1 minute, same as `INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS`) + - Previous default `10` (was hardcoded) + +#### Server-related + +- Added `INTEGRESQL_DEBUG_ENDPOINTS` + - Enables [pprof debug endpoints](https://golang.org/pkg/net/http/pprof/) under `/debug/*` + - Defaults to `false` +- Added `INTEGRESQL_ECHO_DEBUG` + - Enables [echo framework debug mode](https://echo.labstack.com/docs/customization) + - Defaults to `false` +- Added middlewares, all default to `true` + - `INTEGRESQL_ECHO_ENABLE_CORS_MIDDLEWARE`: [enables CORS](https://echo.labstack.com/docs/middleware/cors) + - `INTEGRESQL_ECHO_ENABLE_LOGGER_MIDDLEWARE`: [enables logger](https://echo.labstack.com/docs/middleware/logger) + - `INTEGRESQL_ECHO_ENABLE_RECOVER_MIDDLEWARE`: [enables recover](https://echo.labstack.com/docs/middleware/recover) + - `INTEGRESQL_ECHO_ENABLE_REQUEST_ID_MIDDLEWARE`: [sets request_id to context](https://echo.labstack.com/docs/middleware/request-id) + - `INTEGRESQL_ECHO_ENABLE_TRAILING_SLASH_MIDDLEWARE`: [auto-adds trailing slash](https://echo.labstack.com/docs/middleware/trailing-slash) + - `INTEGRESQL_ECHO_ENABLE_REQUEST_TIMEOUT_MIDDLEWARE`: [enables timeout middleware](https://echo.labstack.com/docs/middleware/timeout) +- Added `INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS` + - Generic timeout handling for most endpoints. + - Defaults to `60000`ms (1 minute, same as `INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS` and `INTEGRESQL_TEST_DB_GET_TIMEOUT_MS`) +- Added `INTEGRESQL_LOGGER_LEVEL` + - Defaults to `info` +- Added `INTEGRESQL_LOGGER_REQUEST_LEVEL` + - Defaults to `info` +- Added the following logging settings, which all default to `false` + - `INTEGRESQL_LOGGER_LOG_REQUEST_BODY`: Should the request-log include the body? + - `INTEGRESQL_LOGGER_LOG_REQUEST_HEADER`: Should the request-log include headers? + - `INTEGRESQL_LOGGER_LOG_REQUEST_QUERY`: Should the request-log include the query? + - `INTEGRESQL_LOGGER_LOG_RESPONSE_BODY`: Should the request-log include the response body? + - `INTEGRESQL_LOGGER_LOG_RESPONSE_HEADER`: Should the request-log include the response header? + - `INTEGRESQL_LOGGER_PRETTY_PRINT_CONSOLE`: Should the console logger pretty-print the log (instead of json)? ## v1.0.0 - Initial release May 2020 diff --git a/README.md b/README.md index a6e4387..f09cdd5 100644 --- a/README.md +++ b/README.md @@ -35,32 +35,14 @@ Do your engineers a favour by allowing them to write fast executing, parallel an - [Previous maintainers](#previous-maintainers) - [License](#license) -#### Integrate by client lib - -The flow above might look intimidating at first glance, but trust us, it's simple to integrate especially if there is already an client library available for your specific language. We currently have those: - -* Go: [integresql-client-go](https://github.com/allaboutapps/integresql-client-go) by [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) -* Python: [integresql-client-python](https://github.com/msztolcman/integresql-client-python) by [Marcin Sztolcman - @msztolcman](https://github.com/msztolcman) -* .NET: [IntegreSQL.EF](https://github.com/mcctomsk/IntegreSql.EF) by [Artur Drobinskiy - @Shaddix](https://github.com/Shaddix) -* JavaScript/TypeScript: [@devoxa/integresql-client](https://github.com/devoxa/integresql-client) by [Devoxa - @devoxa](https://github.com/devoxa) -* ... *Add your link here and make a PR* - -#### Integrate by RESTful JSON calls - -A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. - -#### Demo - -If you want to take a look on how we integrate IntegreSQL - 🤭 - please just try our [go-starter](https://github.com/allaboutapps/go-starter) project or take a look at our [test_database setup code](https://github.com/allaboutapps/go-starter/blob/master/internal/test/test_database.go). - ## Install ### Install using Docker (preferred) -A minimal Docker image containing a pre-built `IntegreSQL` executable is available at [Docker Hub](https://hub.docker.com/r/allaboutapps/integresql). +A minimal Docker image containing a pre-built `IntegreSQL` executable is available at [Github Packages](https://github.com/allaboutapps/integresql/releases). ```bash -docker pull allaboutapps/integresql +docker pull ghcr.io/allaboutapps/integresql ``` ### Install locally @@ -71,29 +53,6 @@ Installing `IntegreSQL` locally requires a working [Go](https://golang.org/dl/) go get github.com/allaboutapps/integresql/cmd/server ``` -## Configuration - -`IntegreSQL` requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: - -| Description | Environment variable | Default | Required | -| ----------------------------------------------------------------- | ------------------------------------- | -------------------- | -------- | -| IntegreSQL: listen address (defaults to all if empty) | `INTEGRESQL_ADDRESS` | `""` | | -| IntegreSQL: port | `INTEGRESQL_PORT` | `5000` | | -| PostgreSQL: host | `INTEGRESQL_PGHOST`, `PGHOST` | `"127.0.0.1"` | Yes | -| PostgreSQL: port | `INTEGRESQL_PGPORT`, `PGPORT` | `5432` | | -| PostgreSQL: username | `INTEGRESQL_PGUSER`, `PGUSER`, `USER` | `"postgres"` | Yes | -| PostgreSQL: password | `INTEGRESQL_PGPASSWORD`, `PGPASSWORD` | `""` | Yes | -| PostgreSQL: database for manager | `INTEGRESQL_PGDATABASE` | `"postgres"` | | -| PostgreSQL: template database to use | `INTEGRESQL_ROOT_TEMPLATE` | `"template0"` | | -| Managed databases: prefix | `INTEGRESQL_DB_PREFIX` | `"integresql"` | | -| Managed *template* databases: prefix `integresql_template_` | `INTEGRESQL_TEMPLATE_DB_PREFIX` | `"template"` | | -| Managed *test* databases: prefix `integresql_test__` | `INTEGRESQL_TEST_DB_PREFIX` | `"test"` | | -| Managed *test* databases: username | `INTEGRESQL_TEST_PGUSER` | PostgreSQL: username | | -| Managed *test* databases: password | `INTEGRESQL_TEST_PGPASSWORD` | PostgreSQL: password | | -| Managed *test* databases: minimal test pool size | `INTEGRESQL_TEST_INITIAL_POOL_SIZE` | `10` | | -| Managed *test* databases: maximal test pool size | `INTEGRESQL_TEST_MAX_POOL_SIZE` | `500` | | - - ## Usage ### Run using Docker (preferred) @@ -180,6 +139,97 @@ export PGPASSWORD=testpass integresql ``` +## Configuration + +`IntegreSQL` requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: + +| Description | Environment variable | Default | Required | +| ----------------------------------------------------------------- | ------------------------------------- | -------------------- | -------- | +| IntegreSQL: listen address (defaults to all if empty) | `INTEGRESQL_ADDRESS` | `""` | | +| IntegreSQL: port | `INTEGRESQL_PORT` | `5000` | | +| PostgreSQL: host | `INTEGRESQL_PGHOST`, `PGHOST` | `"127.0.0.1"` | Yes | +| PostgreSQL: port | `INTEGRESQL_PGPORT`, `PGPORT` | `5432` | | +| PostgreSQL: username | `INTEGRESQL_PGUSER`, `PGUSER`, `USER` | `"postgres"` | Yes | +| PostgreSQL: password | `INTEGRESQL_PGPASSWORD`, `PGPASSWORD` | `""` | Yes | +| PostgreSQL: database for manager | `INTEGRESQL_PGDATABASE` | `"postgres"` | | +| PostgreSQL: template database to use | `INTEGRESQL_ROOT_TEMPLATE` | `"template0"` | | +| Managed databases: prefix | `INTEGRESQL_DB_PREFIX` | `"integresql"` | | +| Managed *template* databases: prefix `integresql_template_` | `INTEGRESQL_TEMPLATE_DB_PREFIX` | `"template"` | | +| Managed *test* databases: prefix `integresql_test__` | `INTEGRESQL_TEST_DB_PREFIX` | `"test"` | | +| Managed *test* databases: username | `INTEGRESQL_TEST_PGUSER` | PostgreSQL: username | | +| Managed *test* databases: password | `INTEGRESQL_TEST_PGPASSWORD` | PostgreSQL: password | | +| Managed *test* databases: minimal test pool size | `INTEGRESQL_TEST_INITIAL_POOL_SIZE` | `10` | | +| Managed *test* databases: maximal test pool size | `INTEGRESQL_TEST_MAX_POOL_SIZE` | `500` | | + + +## Integrate + +IntegreSQL is a RESTful JSON API distributed as Docker image or go cli. It's language agnostic and manages multiple [PostgreSQL templates](https://supabase.io/blog/2020/07/09/postgresql-templates/) and their separate pool of test databases for your tests. It keeps the pool of test databases warm (as it's running in the background) and is fit for parallel test execution with multiple test runners / processes. + +You will typically want to integrate by a client lib (see below), but you can also integrate by RESTful JSON calls directly. The flow is introducd below. + +### Integrate by RESTful JSON calls + +You development/testing flow should look like this: + +* **Start IntegreSQL** and leave it running **in the background** (your PostgreSQL template and test database pool will always be warm) +* ... +* You trigger your test command. 1..n test runners/processes start in parallel +* **Once** per test runner/process: + * Get migrations/fixtures files `hash` over all related database files + * `InitializeTemplate: POST /templates`: attempt to create a new PostgreSQL template database identifying though the above hash `payload: {"hash": "string"}` + * `StatusOK: 200` + * Truncate + * Apply all migrations + * Seed all fixtures + * `FinalizeTemplate: PUT /api/v1/templates/:hash` + * If you encountered any template setup errors call `DiscardTemplate: DELETE /api/v1/templates/:hash` + * `StatusLocked: 423` + * Some other process has already recreated a PostgreSQL template database for this `hash` (or is currently doing it), you can just consider the template ready at this point. + * `StatusServiceUnavailable: 503` + * Typically happens if IntegreSQL cannot communicate with PostgreSQL, fail the test runner process +* **Before each** test `GetTestDatabase: GET /api/v1/templates/:hash/tests` + * Blocks until the template database is finalized (via `FinalizeTemplate`) + * `StatusOK: 200` + * You get a fully isolated PostgreSQL database from our already migrated/seeded template database to use within your test + * `StatusNotFound: 404` + * Well, seems like someone forgot to call `InitializeTemplate` or it errored out. + * `StatusGone: 410` + * There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. + * `StatusServiceUnavailable: 503` + * Well, typically a PostgreSQL connectivity problem +* Utilizing the isolated PostgreSQL test database received from IntegreSQL for each (parallel) test: + * **Run your test code** +* **After each** test **optional**: + * `RecreateTestDatabase: POST /api/v1/templates/:hash/tests/:id/recreate` + * Recreates the test DB according to the template and returns it back to the pool. + * **This is optional!** If you don't call this endpoint, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible. + * This is useful if you have parallel testing with a mix of very long and super short tests. Our auto–FIFO recreation handling might block there. + * `ReturnTestDatabase: POST /api/v1/templates/:hash/tests/:id/unlock` (previously and soft-deprecated `DELETE /api/v1/templates/:hash/tests/:id`) + * Returns the given test DB directly to the pool, without cleaning (recreating it). + * **This is optional!** If you don't call this endpoints, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible, even though it actually had no changes. + * This is useful if you are sure, you did not do any changes to the database and thus want to skip the recreation process by returning it to the pool directly. + +* 1..n test runners end +* ... +* Subsequent 1..n test runners start/end in parallel and reuse the above logic + +A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. + +### Integrate by client lib + +The flow above might look intimidating at first glance, but trust us, it's simple to integrate especially if there is already an client library available for your specific language. We currently have those: + +* Go: [integresql-client-go](https://github.com/allaboutapps/integresql-client-go) by [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) +* Python: [integresql-client-python](https://github.com/msztolcman/integresql-client-python) by [Marcin Sztolcman - @msztolcman](https://github.com/msztolcman) +* .NET: [IntegreSQL.EF](https://github.com/mcctomsk/IntegreSql.EF) by [Artur Drobinskiy - @Shaddix](https://github.com/Shaddix) +* JavaScript/TypeScript: [@devoxa/integresql-client](https://github.com/devoxa/integresql-client) by [Devoxa - @devoxa](https://github.com/devoxa) +* ... *Add your link here and make a PR* + +#### Demo + +If you want to take a look on how we integrate IntegreSQL - 🤭 - please just try our [go-starter](https://github.com/allaboutapps/go-starter) project or take a look at our [test_database setup code](https://github.com/allaboutapps/go-starter/blob/master/internal/test/test_database.go). + ## Background We came a long way to realize that something just did not feel right with our PostgreSQL integration testing strategies. @@ -370,43 +420,86 @@ We realized that having the above pool logic directly within the test runner is As we switched to Go as our primary backend engineering language, we needed to rewrite the above logic anyways and decided to provide a safe and language agnostic way to utilize this testing strategy with PostgreSQL. -IntegreSQL is a RESTful JSON API distributed as Docker image or go cli. It's language agnostic and manages multiple [PostgreSQL templates](https://supabase.io/blog/2020/07/09/postgresql-templates/) and their separate pool of test databases for your tests. It keeps the pool of test databases warm (as it's running in the background) and is fit for parallel test execution with multiple test runners / processes. +This is how `IntegreSQL` was born. -Our flow now finally changed to this: +## Benchmarks -* **Start IntegreSQL** and leave it running **in the background** (your PostgreSQL template and test database pool will always be warm) -* ... -* 1..n test runners start in parallel -* Once per test runner process - * Get migrations/fixtures files `hash` over all related database files - * `InitializeTemplate: POST /templates`: attempt to create a new PostgreSQL template database identifying though the above hash `payload: {"hash": "string"}` - * `StatusOK: 200` - * Truncate - * Apply all migrations - * Seed all fixtures - * `FinalizeTemplate: PUT /templates/{hash}` - * If you encountered any template setup errors call `DiscardTemplate: DELETE /templates/{hash}` - * `StatusLocked: 423` - * Some other process has already recreated a PostgreSQL template database for this `hash` (or is currently doing it), you can just consider the template ready at this point. - * `StatusServiceUnavailable: 503` - * Typically happens if IntegreSQL cannot communicate with PostgreSQL, fail the test runner process -* **Before each** test `GetTestDatabase: GET /templates/{hash}/tests` - * Blocks until the template database is finalized (via `FinalizeTemplate`) - * `StatusOK: 200` - * You get a fully isolated PostgreSQL database from our already migrated/seeded template database to use within your test - * `StatusNotFound: 404` - * Well, seems like someone forgot to call `InitializeTemplate` or it errored out. - * `StatusGone: 410` - * There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. - * `StatusServiceUnavailable: 503` - * Well, typically a PostgreSQL connectivity problem -* Utilizing the isolated PostgreSQL test database received from IntegreSQL for each (parallel) test: - * **Run your test code** -* **After each** test optional: `ReturnTestDatabase: DELETE /templates/{hash}/tests/{test-database-id}` - * Marks the test database that it can be wiped early on pool limit overflow (or reused if `true` is submitted) -* 1..n test runners end -* ... -* Subsequent 1..n test runners start/end in parallel and reuse the above logic +### Benchmark v1.1.0 vs v1.0.0 + +We focued on improving the pool manager performance in v1.1.0, especially when it comes to locking and thus request latency. + +![benchmark comparison v1.1.0](docs/benchmark_v1_1_0.png) + +The main goal was to bring IntegreSQL's performance on par with our previous native Node.js implementation, which we also benchmarked: + +```bash + +# Previous Node.js implementation +--- ----------------------------------- --- + replicas switched: 563 avg=14ms min=6ms max=316ms + replicas awaited: 1 prebuffer=8 avg=301ms max=301ms + background replicas: 571 avg=-ms min=-ms max=1180ms + - warm up: 32% 4041ms + * drop/cache check: 4% 561ms + * migrate/cache reuse: 25% 3177ms + * fixtures: 2% 302ms + * special: 0% 0ms + * create pool: 0% 1ms + - switching: 67% 8294ms + * disconnect: 1% 139ms + * switch slave: 4% 591ms + - resolve next: 2% 290ms + - await next: 2% 301ms + * reinitialize: 61% 7563ms + strategy related time: 12335ms + vs total executed time: 11% 106184ms +--- --------------------------------- --- +Done in 106.60s. + +# IntegreSQL v1.1.0 (next version) +--- ----------------------------------- --- + replicas switched: 563 avg=70ms min=58ms max=603ms + replicas awaited: 1 prebuffer=8 avg=72ms max=72ms + background replicas: 571 avg=58ms min=49ms max=520ms + - warm up: 9% 4101ms + * drop/cache check: 0% 1ms + * migrate/cache reuse: 8% 3520ms + * fixtures: 0% 296ms + * special: 0% 0ms + * create pool: 0% 284ms + - switching: 90% 39865ms + * disconnect: 0% 120ms + * switch replica: 0% 261ms (563x min=0ms q25=0ms q50=0ms q75=1ms q95=1ms max=72ms) + - resolve next: 0% 189ms + - await next: 0% 72ms + * reinitialize: 89% 39478ms (563x min=58ms q25=66ms q50=68ms q75=71ms q95=80ms max=531ms) + strategy related time: 43966ms + vs total executed time: 40% 109052ms +--- --------------------------------- --- +Done in 109.45s. + +# IntegreSQL v1.0.0 (previous version) +--- ----------------------------------- --- + replicas switched: 563 avg=131ms min=9ms max=2019ms + replicas awaited: 94 prebuffer=8 avg=590ms max=1997ms + background replicas: 571 avg=1292ms min=52ms max=3817ms + - warm up: 7% 6144ms + * drop/cache check: 0% 0ms + * migrate/cache reuse: 4% 3587ms + * fixtures: 0% 298ms + * special: 0% 0ms + * create pool: 2% 2259ms + - switching: 92% 73837ms + * disconnect: 0% 112ms + * switch replica: 64% 51552ms (563x min=0ms q25=0ms q50=0ms q75=1ms q95=773ms max=1997ms) + - resolve next: 5% 3922ms + - await next: 69% 55474ms + * reinitialize: 27% 22169ms (563x min=9ms q25=12ms q50=15ms q75=19ms q95=187ms max=1201ms) + strategy related time: 79981ms + vs total executed time: 51% 153889ms +--- --------------------------------- --- +Done in 154.29s. +``` ## Contributing diff --git a/docs/benchmark_v1_1_0.png b/docs/benchmark_v1_1_0.png new file mode 100644 index 0000000000000000000000000000000000000000..ab4cb061d5b6f04d5e02ec853ba136b6ff6241dd GIT binary patch literal 382521 zcma%iby!sGw>C&fDoU4tD4<9UrPKg}(lFAEbc4izbccj=cX#(tq5{$#0@68jH+*}1 z{hjlk@B98Zb6vyinH|sG&vUP}?sc!lXQ-SwJ`M#A3JMCoq=c9P3JMOejYfin3H zPhJcK1y9Hf0)a|GAhb|B8)Gv|BNP;g&#|ia)fBr)(liz5GSLYl9=i~V<)BAA7R9A) z!Qv5pOdCUnCerbR^<{(OBUDQG%d9EXN_t0-yE&+}*AH z8rWnYRTC2>j8Ne7v-{)dw5lm=Vffe|zDUaHMZr<-;d$|7pn<~E8TG`)#ZgSr*4t*9 zD2FJgmBkw;z*l$0X4rntZYU9^L8Iu-*IGdjQM^`?NTB_J zyZRdPxp?KZ?B_EkwP3!$25T_0XESsLH3x8x~b0y4foQz?T3D^CqD-6>3n$o z;~l~M5c{X}N^Tl(rUYCLExHY*_d>i+^xcCX-1KQc=fItD(n4Nsoci?yLCAkl1w_ zY}*nj)#Hq@E}W6v_R5!cQM1k77y6W(Z)YWl=i<}JX^Ki7PJCN`aTsXd&WhWy?T1RO zHnDMAh}ng~)!a;SS>eRcRz4zmj`5q(v=VvxPa>!03}pbn;k z4>e-@=%6f5vGw-Oz>Br7410HH*CoajAm(_y04XQzhI_FMwuzRe`hQdjo*eP%Anfk zMA7MB>u8H1eCz(cwRP?XQ!8bV&X$w)91Y4j)!yD7_&ODr|G`(Chv(-smjl157V;tZ zUiN|KAT#f%@2mIkxk;OTW$Z^uK*9ZhyS{vH)|+?#z6dRznD?g$Jch5^oQr(%J}*9C zCg28(yimhb@x`e?wZxohXRUb1@^yzPwxU>1Fot(<0n*?cK&^G5rW5}5V9at z{a`h^2SK>sMBO53pWXZXHY!r-8QLRykRc}M;fuE_w8TFdaxi~|P>IStU5|8J;W;GO ze5XU74#iUzhd~X-D5$?wL3t>?bY?FP<2isdf(FFnGgeKx;e@5)@>!(ATc-Cf@9lg? z$qU%jGftrI{PtRnD9$I>K*5qk#V1y8T#b4HeeX!Q1Z}HbXh|%^o6}aj!3T1Ho)RwK zX|fn{faAum6JEcddJy=D?KVu9p6n47mJT|r9v+=9yFXNhiQe)#)w7gPU$( z`e_2eh3W*EzNd1GtPExJy9{Zxb%Zp8R}#;nP`kZXAi+-$BrbCFM>Iy{Mr=oj6&N?s z)Wa;L9O>>yEB@5Cd(;@(?CTcl=6@Y}otPzAnf2*?7Sp{Rt{#OR5-Gyf$GOpmiA8T~ zGcTdbh3aFCV|M1$CtN2_PQsFsk{XlpHA0gltiEV8PCu!&H-9)TRAf;+Q*1xhRoFWg zUtpsM&h*f{!4G>^op&*+V0uyWyoO@>#WdRVeLmk^vGoy*<6p;8E`m2WH@TQ^zf1X+ za)@L@vtU;#6Wt|_hc zt-+W+s^zsvAMxn>N=AbBz;`gAYwFwU{*5s@#MH(|Se1hm%yF}3^B7^i1xGL-@rj~ z6cYNp(;`fs{48N3@jTfuu>|zSc*LrQ{e+u1u?!?=eVJ0&YshJzu-ZprT%H)IUXa*l z8LgiD%fz1Y2>s~k(eJf0mI9JjAr2wRB>f~Uad~msEGB9b8grFdYLz9R(h0THS{EBD zgH*$oukdzw@4NW`3q?~!3;V%1<1G{ZUv<-`Nz}t%jN+~82X?Cj&G(Gc&2-H?yHQ~u zoe18P*qhS4BU^hTBT(WpB`XdSuQTvAC|!my3g%?^k($F;sZ~9y*`XB_AMTQwQRBQ2F#e&V_5fF z>;4v;&E19V{(chor0|LI6JB+`64MF&i8t~-CSL27gY;=MEwe3$EvL>mt2-O1J1yUv zEMoeZ77JLipIjwe?Yr?@IGlsr72SC+9nTXl4z7!Co?K|2<6hTaf(>RWtEc#Xv7x1) z<)Ge2o3n3z&H0A$Ugg(_uTfv?Xm>LFGJ-Six39I6x7W6}&{@-sLgf4&i++d12hOcf z*k8<2Slc9sPxV#AeLUH3*bm+pe{}k2Bq(oqW20i2S$saECYVT^N6eeadaS45YM5s@ zI{RT4fkBqROM|q|FF~n~TzS4c;^nXwpKUt#I?p|DBD5ql#P0u0@p<{N#p8KK42Dt( z1R1AWUmpi)G=B8k?ywg*F+XBTU$`6ic9uPvc{5<8bm&S);P zB0HNdB_S*IJMz+>w4-?KB-eJdXvF1$>Xzab*SqaQY=-6ry%wEY+-`L1(-!9Mj0Q&U zobq&y)s4f0=!*zidT{!uBTj_)U%F~Kddevb>}uIroA1(9*1gu81$7&aj7jc1zAsss zV^B4@G9K-kyEsznEHQW9-04PrPCx&$I`-zNtJ;DlRDEAhYANPMu{JL_zfP_?z0P#H z`rE>{`Ml-Pi_v70g#HBn;^al0*=(T=zrrGN5KN0+mI9&CMj(S^gW0B$b%s8QHI-5Pq zOho|vU0q(bKvuJD>FTtRMVpeLidH#Z!K}8CQf&d^hOZsYymd1P)!^*7q-&r^Z%M#cmHO4z12yUbzMrqv6vD^ki}$R4 zT5g!iSX+XL919w|n>H8p^1sh~NuVwiC~|e!bFyKQRVTc8&Vzojvov|x+Hv;rECQZte>I-!!=Ju4 z&?w%ZYNvbT@e3{c{-NsANYbXOT& z9f9TWH(VVaRSxp&O6a)lzPMG~8Z3&hV|#eLdZ%i!moPV=I-+`0PTI!tsd}e$Yi6x= zqxJrt-MqVp?qU1F_tWr_a1-!zcb8M=9@3Hc!*WO60sLDEA1wP|#^n{T%ioY6O~>X<|I&wUR) zyEi78^sN8fSwwe<(^>&3~C6Iq6=B1bypQOLvd*7T4Hf{n1|EQo= zB?&Ta37ks>G&*Ib&#kQ=nAGSNBZ&T)Nw4R?2s^MZ1g5yCJ;IvLWQFMdnc0(P!rHlM zcJ$*X&xI3<%eN)o{4?hnlIR7|T#`u#M?S4TOlEV)hT<6{IX!fS;?F~qiG0i1$Z&Wh z;VT$_?vn!Y_3i^NgMlIXJ#uU)CdlU-<$qQVjZRqGh<568O$R!YJqwADdf-1Zs}zI+ zi^9Rq#*`G%HK)zNdH%?~FoNAYg6KFGQ?v;B*xkJyS|R$4?!{jeNCP9xY*En-k=%>% zu9HY|6a8l%5RzVSDZ$luMO)HdAXVL8k#_$)uy8aMl+i4TYk3{R{qtahBu56DY?SeJ za|YW1IqwThU0rq}%P5C#+&}lp72tK(ZY|UK6AK;mI0MfR-JkbA0K^pag7*kl5SoQx zL)SE3xm7**2ap8>sEB98kKf@(`LvD+OAt#vy;Pvn8>4-(A&u{+l=5 zFBs#15$Y!2h-R<=3v3n1b@3NrW6>zESeKqKrO9{{qq5d2`qjU{VtAAxH}cG^a}f9P zak_1^zOe4U(S%&ysb`x}q<`+y6c6>z_jf{QVHq20kCwZUc+X#8$vzcM&5;}lO(#8m zF0{Rgcm5{5z*X#Y?urSxIS9DTn-C^ zaH2)!emCD=e2N12{2z$lkp8zhY;55M_z*vSAnE-N-qe5*W^~rEyz2-u@M_+UtPuX4 z2l;zr%7_=d3fj7WfnD}|iTP__Nk{|xm&u~K{~Jx`$mNaBk=FThpBWtJ*AE>HK=0ns zMns|*(b6+Di~RLp>;%B|F?~`cG-bR%llqI%u)p~92k`kn5I-FMZ*v$%dI;ucR8jwU z2!aY=gu0f2NB6Aggvm`qa=ID+&O;h{LGhTPbq>50FtC$Y>KA_vjO#OCVE-~%w99{^ zX&q=%Lsbj&XC+|n zfPo>H`4#>enCWA{!2WHr$GQJP)54ZCE2O>BpespU=pu*ciG3eOV4@46P#QZ#7$LF{ z$~nM(Ht*>fHT9V>ZHmfi*7ii=+*9FVLenNOkjA5dxg}dXoM%)X>^@m509GQ?{j}&` zCQhRDQG|0?j1X4Va?nY+HY`rFb3M^rQaxbay6?~CpiQ|M{t$dUwB5AVy0%k4FXyA7 z=TF?{vQ^4E^1=DZJpwXRz(@prGCmRAt9=H|!JBp6Ygt0kx5ri zqV@rmv-?>zUgHz^{$Vd$a(IrtV1x+?K8dxiGI43#k0UR7b#F(;ZSp;MPud#g+$j;u6>=xk?%xMfad!Lx3*haZzUNYyWI%;tKGK^NHzQ2h?Ma1W?^S7tGM|+-kP0PF%V99lF ziWGsn>!Y<_Ch0p;W@$pcW%n;oz46cwCnQ1WpW`tJg>@-j&0?uTbe{@aeT>+U_VuTv zvuJHy0TI2bj$Ua#8sMD2oTz_lSlLWVQV^t{Zez)xR+jxx5I*iKh=KiSIK1#oHo2*R$&w1XtA62K{^tl~`=yHLk zY_g(X@yu9O%T1ilx!P3U-Cj9)1j<=AEMivg!nYnY=3+BZ+B6>^7s7R?HvY{{&1k@E z)cbcT2H)u(s&0saw?c?}c>Z`S_K)|Rw_dh9ZoXJcPVQxGvJ4_Uk@Ro%g$UfPe`Bei z)t}I?C@s_Um=D3*&ri07vx1X?3WO-NuCT^R=Q(W>o7U4@aVCqwXN`l2+>=_P+rT7i z-z4sgMqREn;O`V(O3>>_CDX2Nl{F0rT&zSJ0~9RuKB79J_VrgKMcHxU@Hrb$$yVUj zq|-axCt992C)(<8V3t35Iz3txPed+lPiS%lLL9}tFue~mQUBN|;&_Xc_fi3zh!5&O zh9|O!&f@acBr=n3TIm1*M9I<)(_MW##>0yXadRWry=0TyWP%g5x zaET0-ox&h2i$@N>S4(1Vj#(Q4j}$HRiA9EB#(ls2KE&|^5t#X*t&LM7LQG3LFf&8i z_oxbcr8GK*8oW3jf^w>;$h6YpJ{VmZC5)v=zjFPnwcj_bN|h|Nbm@qLJCRr zxRMQV?-e4=g$p9`mVI_%E&~sv)+Ic2eKv7VmWN17|MPP=!-7Poq728|q6bryO*?hT zZ;tvncLoREU)SI$usH#6OHfwTh6TkljN7skQ9adRMMk0xT<>OBY>im=M{PpPk=dzA zgMPq=MLRaxQ$yMx;K(UpjkO@*&D;xxzE+m|9b2hLVBa{1K0T3!;GF9bQ+0aNhIHKp z*7a`Sd@l2w_40%_ z7d(M81~Y)TASu?5XA$*fkEMDU)7|t*G%YI)l^2KHwP%dtwWI1>rrlksAtKT~1{B`# zGbF~0C~(X%SgnZpfbf>PxO$0#<%hVGwUWn2u`$XjX-X+?T%)L744`y8uq?`4$Y|vx z!*MJm1z!r&+N9<@lzbYD132-QuTpRr@ZAeDpf6LN9p`@gR`_AW&Ar=*sQ{H5rXMg% zVNt?iD9wgHcQbf*E&sDA(YBMk-!NL1q?xxM#Mk^M-pjU|Al{nbUR<{34-BaeBOYS9 z8dSa@qA~N(>A0^jX||S{M7;p=S?SzV@WBtlcW7s2O{;O5p?H|K1YW|P0EO7T?Ub`5 zAQJ!r)rZ%Rc4AenVJ+v?*!EC8CbAz2h__aRwScR2{C3^`(Bs1c{_To^Jsn0G&+AI{ z58fDk)q_Gat5yv=WTSP|1LhxxiPil*kDFT6^oX7z6E0z0z<`SV9y^`=cA^}A@6uR; zDMf~Jn)OrgQGsil$s8!&nCa7?;F-eK6*ht{$-q;A))6Z$c)EEPkai8jE8qu{YI8{pZWwR~IBIl|6h>yNihd?TjY zlk@AkVkmyCx1o>f@-nIyTA?bsmSFIOF2F}s%hq4rEH~CgkfiwxfJy?uXkLF}8|8!37cgw}?fYyb*11R$QmYQQaBj0X`JS5yk80C7eZwY(`8Ebn zu55;c=I8xz&+Fxg&yTDdioh*E6iwW6*sabyYKD2#>g7l;f8n=gYG9fS-msbQ`(bKe z7)I&&hxa`Gsi}&cyLvT$=b-?`{%v1=vqaj)K3$p_U!0VgF?WZc=~m!>K&{DDtO|B_JAGm+p2>mJN;FLC48IcgfbC@ zRKFUo+?6OrZ}!xCT*_-+l)I1`?x-rO8P(*wxRY-M4pcg9 z$n98<_@mFLr`RAOwfw8Ijw|6kZ%`wsR0bq*N zlAF$b`b=sn^OAElxzDv0SIC=WaK$ILv|3$Pi+j$1;9HbJIzp0;^8Zf_mA*mufk4A) zk_w9>=XbQ!S2`V$W+S)4u3L$cPVy(!c}+f)YLez05bLWVq`1BbFs!p5L!14)^D=4g zmrdrJ*SP_PSKrUudVtUAa=cMiQN!2WexD|VOiA10VpaR>*dRNM2cgBk9LfoDax;b1 zWrqvFk4oemXRPXTP(Su@bUiDcW(wo%=TfY?ZsrRb&zM0nwX{TJIDY7P{=LlNw9BdO zO4~JwYTRo?>Py+Ea0y>%X|mg4ceF#H!E#kED<^>OrbkU(G@b&Bx3T4J9VHdtaNbET zgc}a27bm}}9aG?$USaY82$k(*Ex>jLO!uc?!1&r#!M{uE=aTwX#^0CZkV?(l^|F}M zO`8|zRQI_Lr1cUk$a`K!-y~9XIA!{iAmDj4liE(H2dk~^n3N9Wjd|oa*46zyYqR!) z+`PXX6u+sNrC5|T_Qkhd8y(e_yZ8TO2lX3q5Glc9geI+8FV@}_PuN=^hMr8lCDk@8YdM{kYf+q^;V|eVBmZroY&FggL6Zbu{Td8a0S)`1j<;#$ zy9{hs52k621V1m5Ki@rTyK89#I7~FVl4sl~qD_NXZQjMof=eq9UUyHWWxH<1no}Np zQ{xOUmZ*=%@+mbM4S0`RFUQOCChGYf8ooZ*5-cibPAl1645X2p#%%Q@r@1`Q+Y&!9 zY11hI;>p=>lNN_B;R1(27vtp~u^U|gtBM^tHX9sPG6$ zY210xu zH(tw5a-^&Y;Jne@hIJk#s8p*MNG&h}8z7v@*8+EdZ37_NFW=JKm48WPO|0!dYy%NF zi555>f~2khu6foN_`?OhyBG|+%-qg-KaSdZ*0PrfP>tGURYv-YQ#fk(Ib{Chs_y*9 zakH`TywAM|{*ij|8wi5YE;Y2(IL&!&Ah+r&k`MK6J!jx1&1g(ni-i8opypaI1x}HT z3XJqbzR7y4w4P_=DuP+M6G!vp2#8x_OgD&0#Z>63_#Wy>q4_GLZEtF?3vaU-@nIKd zo`)t}@HFAg>6F3F;7WmG#~1$HDOmq)L}fn)sn&;@kkFBW%i@uO)1b=(RUPNm9Nk}+ z^87F76y*iEyR#APtTqR_fHR5)`)@Wy*5zOCU(X}vmEBeBpiLd6M@9IYy|1dD?K}@7 z-)JrfQBP;hijFE}b6Qn(?`RzjK3H0Y!zR?bN?I=wySXb`0UjXQYPokN00Wz?Sbjhs zYfnC(7}<VbNw-@X3orYYF zS$kSfAR;X$+!L8Xfq=KB(Oy-6S4`=LJO{0hASM8MN@%j35(OY8~xIHobH@zAf}6W)pDEvdotde5~oMP3NYF&a|geQ6uX`m z@6a6R@=Gm^1Xg()L_J_u!0fZ%r*?p@J5GN!=<>DC@@8{ViLi1tum=z4__ zEQv{TbDzsV87-ofBbkEDT*6Pw^vW2<(@ z^}#zk+f?ud6+nDm>2rSrQo?syrz`cuaKw zQp&u=!Io$Bn9N3BH=6`bIzY#hoqN5Is)$H`%nWX(_%)@6%1K8hB+(=!lSsmg7bQFg z4^CIHGAMG&+B$ZJrbk}ly5-loluOp!@DLr}Up*;R()FJY%yctd^2J+T?KwYXK)qR? z|C5E*n%3rVo%zySe|=T)_1-%|1#QM`Z@~I0>6{Gy%Ov#Dpn(*7X;YyTYQY% zY@`Yi(P}O$8rdGb*Ar3sEk-1`CwyMTc3DwSH+tme^Dh@0OCYnME(ggSGL9B#FUpTi zpwY=CRpdCu>>0k!`4q6_JMVF|6NdPmNi7#6e82R(>l?bQK; zZABX&7~?g4&A68II&Si2*QrgK#6@CMnH;}oYDBk2WD>$$+*3YtH^eM>xz-CE^AHdD zQr<1BcEiy!9d6AxmTnIyBY24so0F2qp?HxeaxliXaJ@|}185PAAH%1(*t*Cs+LvZe zBri_CFFDyFIctZ(Qa1wuphMGk+^(X!Z_;cr{P1h#5(&QP1?mB4+(cvB5EzvqERzMw zUAzV8FP9b6L>^STj*s3_f;rjtPU()BO|kgArDboD_0sUen9G&0tXL!pi1FR6EXh&)o? zdMGhg7q@QKr>N`=3D;|P?2Q(;+!dE)!DINJyeL~Yg5r05=*QRNv%27az7dGMo8@5lmz$~ZI*}4&`gcNHnrmf%(A|y ztaTy<N_Lo%d zHpeXEys+1#mqI7sp%uvRHK803EkAMP+^ba}#H5Wy{cmCrs#vkFxqc1z z+KZ92p2y*xSYx`$+~GVjD+WGz%NP+B3VZXT`rT2Uw=HNl78B;;5Ye-$b=wAEN#cnm|4LhMMSL6#& zYaX`0#?53p9+?v6dX|F{;jR`bh{$2@jPK!id5@GPxk0@3w75p=?~2znZij}xkMlh9jP(DTmgofo$atvl;W#ZagS|t_?6(~< zG6V&fKn6=|$W*|WtzvnaYV#u<0O!maaiVZ0yfpOJb{zOEra;Z_Xce>vjWzJtFe&A{*L)=RjadmSuj%~*pK(X4`nWbL2v%0<)e^UQ4p7%bXTYR_R7;um zZ}vRfw&0lQHA&f9H59dVTvc+KB4)J_5z+=lTju-?(JzC?E+Fun3T~{T*wdW)Ta=0s zat$+q;>-_B1JJn}3#s_xwo?z&52JIGyG*I0UPI@AW|pG5i`2RbULosQJdOSUQl+FsLt%K75PBlP^Pp z{w7@{!J*}5jKwvvH?Fa~ZQA7ycqUA*sR(x{xX)(onG1fz#@2y4S*?qVIXMceq4_YE zRQP!EpE#9^K)72~0JdrPmXf=yltTxMk70D3`+TbQaVno1R{{kB50|_#*Z8uUGV`6Y zDS_&oD>;riW;w-GQf@)JSU02L`R3s4c7A3?-O><3bUI4I7p`gDxJsZ+XpuYro8dce zCn0X}eZKf!&^|Mo7e3F_!sz{!yXk3Rsxg8IFK^6^8|7!JnpU-j$tdfUD_lxKH#HZ7 z`;_+xnBsw2DJ!posITw|lD=fNb7^dW*>+gY z7j#!zPknYsQ)F-ET5vQ7ZmZ@CR3TAGWj9QrO>8KJF!jBXvu&6`yYekj|5o8~UE!dO zrd}puI$igDw+AihS2@efwM#C-;gGShr$7WV2$@BO3B9>Q^L~rCZf!R%C@T<<3CAd zEw-sMLJzIRnc6am`z^_oK}6T5^=(!R2_Jk6ncuoM0qLBB;#xg#Sa@qbsBefDfy|&Z zu+=b?=+-HVVt$7cAyf+$+e{K+u>^NdCjkQkxc5`rKowyiZP7@T<=dK+jva`W7y4BK zbfu%f~|Db{p=7eN@#O>=FX zEIyJ0u7thq!7fGds+IhO8P1Iyr~Vj@js~?(r$Q20Im_~mmHzKDC#LNh3!};W-jVL- zacWu8G|w%%-|xZR)aH(p;J=Z5T+J^b88<4+s~une%Q(5;6xQ>KwLGQw8W|X3Y}+nJ zYh5b9pJfdo6Aen3=8m}Yue6km9|)Z<(CLitDkMb9@~A&$>u$GVoVou@eiFJ5Xmb4# z#Fxi{;}auN@s}viJjrpdI;$L;8TZbUIXri9 zPJoj9`}x%TaW>lxAdXB8#lxj5+BVNli~z+i$(yT-${w$~5%cwWiv2*@7Qzo|;7`;Sk5?`Z zK2ZLy7?vc!%B9(pda)TmOi?r8&OJ0nZt!L^=`b%K6&xb#+hCrcq3U?wW_sfLh%AS_ z_D`tD+4zQv?=-v$4}-W3Ji}9yn~rH}kr6ub&CfPO_d^@$~@+6SE%}Wo(_o4Guf(69klrUp=Y?A7j(o~eulhu>6WvEsnDOKGEl3uU6?19M^f1Yuv zV2EsT|8YoV&geg_nCj)4(tJ8?(T!L(=SA!R#R}<8Qa9h%wVO(VPN{7b-mc}7&WR_R zAeaI!ujyOYNy)QqsRpxm>W6GR6r>Tj?(tlm1Ry)+G0^5u3Vx^z5V0Ax( zT-8G~RP}>^ju@P7JbA%t_@l0q!XiPMwJ~S8mL|YQqvf6RA%9^^5OjHLuLfDj3N!Nb z5}6@uq_&ibHvV47DaUy@*3D3d5Pp~o=2C2!?7_gH0aUOUWD8V0C10jU#^vAI8!Rq= zyqgE8%^UhBFXCZvzi+2;AQ8d$I`W^8+eCywKK}CBEkXIRSZ&i#eU(o-G2ir3{Ieue+yiky*egQ{dzM1AqVlHWES^LBT&RkA z0Dl$yd=QF1_ymGUURP#tj6@^Ix=S7JtQl{cDoky-#-MeBv~%sA z!tri%=P4*ZcIi6k7T-1U`^3I!&Fgk9nvuA@oeD&-6t$_57Dbz42-U{Ap8>V?=Y(`O z>^CmncPUr=7SlZxO|Pomnl2WaUn3KvC5{6_LM)&SE<*qA(5v-097d=w`cE?74v1)E zI01ajHB#Mx7{}-5XWV;UojHpsFeA#9Ff<^`SMMkx!0?PdP zcCSl?MF(wxIvrCuk{_}eiuwo(VgdZH1(3IvhvL}aWh|v!@4wAQiNiomkJF4NEC{-s zKpMFCbKU8P^4qws+u%0!`vNtO>k&W76!V=-mHF_Kn0LzT`G+CC@5^*u%e>!{LyJ6~ zfTC({1NbkgY6O1)^jTomKyWWmg{t1yzKF(RT+nkI9pAzH>nI$m{hpb@Z{=0COsU5@ zpOth@oxfHjpQmZBB(Iw*YqkWwi)k*g6StJBZr36rr1F?IN7}3yU1|-4=a8xr5@GxD z0kY<(jjTCNxdnt?*7oPM5m#bX0g8vqmD+m2=jtTkt)`b`I6YY1^Qm9+s{3qgOilc2 zv|kam;q7xfx5HXRgqhsU^d3xxkNJ5Fg0^0=o<*jtv)I#p4=y>dxVO{8TrTi~U_eMW zFBDSjkV}5@!sBGKD0~3MfsX-k9}ng?bRdIUZ*9WQkU24P5yI^?BN~KU*PM8tE4#g9 zr*Lrz=eOz|T%|`vTh|jbdyF5lZBH>d6m06O{U;oN#`2;Lisq7mT8%5HZiKm917Eec zO8V5gNhUyBV^KDG>ad~2?m2Q9pC?fTzS#dm#SlXRmmAr*>r<)Y z1;5KCMAcAx?^J6Nwx&bJYdd5EKm<&lxPSaUOTp!kxR&a{D3U&zs?oa^P2iY8f zjeYNgHliL6$QlcX*$jo(9`2e##hDGyQ|F7K)d79MWk1ZuEgLt5nrRchagBwKd9JBo zM81zzdc7I>ulH!b`#4lH!J0al5pCO0l%rR@}814Jv*LcLi;taU)FM zDano9fWEZ{42?467FktGcBv{|r>f*NXUHU|8d#d?^llF1I>nWPejbDw%(k9EwX2v9 z@xZgK2!Kk1U)Yd8k#3eC0ezXZtnLpGh{_e@UdhyB6#!~7-C`i}5M$WeEB)RQ`S`-< zp8CrcWg)HnR(DyMQ8E54LufJa`s`;Xc)5pBm|4Ss_H}!U(5$n{oA`^=7e~}&2uddS z$;Y42YSnqa>2}1?M{S_oTo*sl)D@e1rL{2|Z#H+p-9nkeHp1Li&OnsjvL+WWRtu;d z4Qx%@Ri}|<2BzCm%l<{UYO3%4IPE6483DUj$jV2~CmDfX=m4k*bEet^08ktJ_DcXd zu_Y=!o&N!~88h6lV}WwOOl1&ow5hLh)41_2j_NIYXCla1%K*g{EW91~2t*`fv7Ot` z>cp>0qB|sVP>hHJ~CPy1rO-g9u9HjG$lzEej%qqryZg};ul=`7#F?h zKMewJu1?B5t&jy+nM4PC44c2*aE^lUD&Xq0v2NDytdkcf$*<6!B=v)Vj>U%d6`?cL z@592EK+V!3rmIBGdDt&XKrVGjs8;XDry_Cge zsezVVH!W_?YOg+5vY{v&C})4BNj{W2?!4ON)KkykAOldPRL$??IGrQ&Ltpb$N}v?( zq!dBz{}+yj#5$f{JRo0DZinvIx80Pg16`gz>OkWFC5UL7o4h@}Ap_l*gXqUpgsuaHy&`}EVy-&%IbR+|Jc{(QnK zw~gP66gUBwuXn1|!}VF0pKTpKPy}b|rq`cAx{!&9geI%B>q{b)Y3}hYZjlmIc+A7Q z+mBY?YtDQAKvc@c^l_uZ-a7csdGZ-QIlCFZmXp6^DQ02pcW#|!zS6Qh7WX4~39|Jd zn=9_*!K+c|ew?QDW{IWE8}s#Cpkc84cAKviC>R?bZwzZsiU#;h_oy?Itix_ypAViV zg_2Eb+O!xxZ*M*@U-ED2cm4^5x`O~oPphE00g$WnN<%Aqp)QaMsiw8?{Je{07e{1; z;1{IRgb=9H$;OyB3uLk#O!06S9_M~m2#aR~q*r~Wf`Jg<2Ydz0@gVDwsAKgMO!DzY z1hQ3e;ExziTY)JYXs_`es#&5DpHF9yz6ry_r(mt0wUeY6O&j$prKl~F=DfzqRUHH> z@OZG7Tfczxl)G9vRijMt{_lIZuNHKq1|R`GnV7}y>|AKNzC*jgujnA{HLGioRD7Te z<}{^uA801*nlgk@%L|^2(WTOPe1F5$JG?jb#Cn0Z=C{}5r7bVnl)5hxGf#&vhdIV1 zH#lu??17%-7wC9EO7fTNMLcCRmbdC_aO~7JjUy@W#C5!rFc4cyX}lH0PZP zv?foz0@@=}Giuy-fimg%g1~Nec0E)A9ljGlPW+IZqcOn8jOs=iD-eBK3wl>*rTMGZ z+;bx}llns$9P!e%as2>jTG`N86_9osX7+&mfWX8n7uh`Pr2^gUf(A#4(A^Cp}(g$*W#!Qd=8iawkd6N?n&SY+q!98nS6MS~+r3 z%>bYG{71u(yQhC=@wF`_rEq~@p{qu&LiB^0tgQqNgDA=5TV7o_*J_O7bcPQzj(I(^ zrwP`TuxYNIlbvv)S+(sK!|kf=L#xprmH#_}UWE4bLCLR@tusl+R6|b^Aq@zVm3;9R zJlI-6_0S$c&7+2y@inubQ|mzUpKn0Y`~2(t#)wB+8u?bH!di=$@ANZ?F$^eiiqIr{ zoV-rdPHsC^CI72RY_CQC^2OTXiPj?8GgIQV)iyQq1G=Y%`ar#}%Ias=>*h*g?`pzgFzv=r*?hl7EG_`u zD?&fcD^+u*g(L`{myAv*FDV4z;;RLGQx?nWK!Y<D=j5 zP-_0Vt+@I1Ps49XpECzFrMZ3Ab7kZD^^2QG>1ND*a-)&!I}N4rMD{pyDVfLJ%^Bm( zxdw-<`vqOT>#k#+`ix>`#kB?9o1Z_`qMh>+^n<@Rg}pf^G`Nmk=1&WJ^P*ZvJ5O%f z&|)dMlVBu;`*zfQzF^4H_?ZD%eN}7CsZ;*{(DoKkRc>v&FeQy3hzLlhbSo((ohl%? zkZurZShPrYNh2Z+N=XP9NVha19fBYoOTd3VEZFDl_xt{F&iP{u$KL8%IG<zt{v)M&p_BK!t;r$iAiFmV?b_3x=ASz?ZXW|PW z0IbEW&ztLW51;>7p-6GcoZJL8^6A(^)sAskf8!FLfaep?KIaEOlnl!I&gEx>*j2hmf@Y3bh+03e_c*xIpFS*vG= zzWW(MQTP%i@m$B})T#Jn;x{G1B+;wEQ}*dr8dc%O?(l7k%-V`A0(BXER^=DNFBb?y zECF83AU8>4$Rej$ibY0qy^H7iqsME(hn57z4Dvz85Y@wRdNYuzX?ghD# zrbq4rt!S#H8U+pO0!KMo*foqFXfPfzPs&bl}#N(suiHY5`E4Kn}WvJ0fHhs+Tg zjC=Fx4VzJnS-S><%2sPrVg@_Ski2N8^v1O^JL4%%&-D|Vs=~AUg9vTI;cQF?nV_4w z0s4NGT}~l^U9$M6oX?D(VY{4Tz6((yXEdUG;#i{nMTw|`M2K|P}zgyP-QDtpE8ntr@qTVL;vLKPXCuJqppFpKG>n2u8TrFQamuaHo61h%>j=%K?3Od&qmhZ{2lOiykc2?YlSh?)K&563_i`&=dT_tceIp!+K&B^+f(}=0G z^jbab)292|q9?^ydET3rae!i(DOM6X!A?Y`i%w9-Sxzu!nmNy`qx`MOHrXe^7R=1# zD>Oil0H`m>&@BF(rc}_}l$+#Bn$q&BEz5gzqNXY5gw+rcvVK$V zzHWlaqkCy)@rsL03({@)o;ufuYYp7I?|-d`cK6$ACV3K&P_4-Cpi#VHP5aedDFRa# z0GT?p_3cOMOBEHNN5HhM$xJr*A<_~l#l$}Ie5ja|`P6=82ZSdcCac^bfz24l1gO~6 zQIc5=;g}I4X9t;or?-Dk&8+>^ycfH^XFqZym_%Ldsk87f;Q}Rn>kqHGlw9MNGe#qZ zcK0H?W#MYYG~>P!Wj9I{(LuWqH+drF(IH4?*4gOJ?w2p!eujIm0&F+V2Iz{hIXbi$ zf#&;o@vg0Oc-fuxw2ZYhzQ`&$F1=Iz)6_Ezq+0;1^lsyxThHia(h@VqPk`$#WB&$J-`!mv3miIRf6%f{N2kaNndlT zRsQX%d%$4{eK%^4%}sBq%+p3qm`j?8-ecL>3vX}Iv4BFnYU@M?HPMaggO%94Mw1WP z)Wfa&0xY^}0)w*F@4b!s*;Xo>`ORCLd{yG6wnj<_slx)R(SYg%Choe2&+d`&v}#U zEEmr!CRm$ZYa;PUj%vaVUCUXcdHqr{zYYxUO6;a;hX^Sf@{rRF76ozOiq2aoX+dfh`!cfX2783Z1F-T%W_*+JE zW_fOo_WCzTwN>vDo<}-y*Zg5K)pLJ>1{=zygvnR_sJYU7HCs4q}5@07; zRDLzhF?T=q^NcPgv^7r@C_w0jhV9c#nkXKg!yf|Um>(X=kvIGv6W<*@*3ubU%+tu! zEQw+F9hs|ch`dKza->qh`BM#U~2edymlza8YD|5Xp&AC-9+8L7pyMwo#(DLe3 z!FBU+xHTHoZs#9|bU$3a4!Z^b&F`$8y$fmVA(-*K)1T_1y0-z+n5SCnZMpK_;r6ly zg9@)QM-Sq)6fF;DJweSjc=5`c8bi=DsO~&b%C2p>1M9F)ZTVuLiyQm zIv7KS*A~W$! z+=HCEbs;(j6LZl}1AkaC{XjqlL{{F4jbk0w&S{7O88wnovsUflr8o#kTsdW%>mK)5 zp+F0B*TJytFaY0LW)!GT#9_V}ukQU9%n$_Wx(wb= z$WGhS(4OqN)flPSY=0)a3?XUa?_p+v>EVCP3tvp1&z}Qn1iPI!YR0Zo9PfY58Q~d5 zeUeGRR$TXjLT*%jwGcCo0;QJM_B1&1{V#ED^2B3Mj40X^AZ=L8S&>4bOaapse zVR8WMctksd0;OYNc(8Eg7jD*ORhs!q@@)dKU<=xYekZ8T@b5MsD%@7<(8&-q=12QM zK`b4=Yhq@TSHOv;c&fhtbtVm%nzR6MD}}SDz2u~qOG`_x7ItY%b)H3kN%ML+(X1m@ z#zCo{VafFu+LYU;6EkEmpQn|T?@r641j~dpYIoPu9geI?=u2q`JZy3V1ja2-gQL%E z6zQ67_VGN>cj^2{SMe}YN8i7mUDBeAJ7nl3)0x02r?)Y`vfdUt1kG2%s2tS-;aWng z$LBkGl$-i<%UbN{*3Pe{)W+a7>-&bWcq8n^7OamU02mxJ{CmJCYRNb0zB|`B8fl6) z&oPR)IILS5qkNV$1mr76n0%uZ-uU8{zZ*-GQkYDimxW2CRWDSuSuRj*t^%xgOQgNo z;|HxEAi`y^mrpR7vlSe!>JuqhxIYzIUfIPa&hpV+%$o(+fM{L!Y1hW`kRRUVSntJ` zj0iJdOeWCzS&vocR~puE^v3u#{X)3B6tVtRZ0Pg--A!*pIS0LwQP<7zoy~nbuDYBR zM)5JnRk4_iZbV<=@l};6iLqtDZtqS+P5VPN;jfVAIS84~IczO-pZb&*--8FFv{BtJ|}BLg&RZ z>90Buq2gJRU4~CC_tQEd!AE=fMe$Jy+~>H(!e2$sB>^nGrG4{5m^a&_scZ#A+kf+@ z;ysMYg`KYH$gv+DZ-OVfTK?AJwDYaab5iZM_q?fPf}ZM%27djTnNJa}KqZh?@7~Fj z;xb(xo@;FNM~|Qt3bhS?5&&!$lR{^a4%W-fk}AcjAJS@>qGpHOCN>#ot8Ua=ewTxw>pW?wclOOT4l=bk3Du1`tZFUCH-TB z?Dl9HKMoRU%KwT+@V;gaUeELfMHyG|%|j;lX_ez^c~H8~Hc`=X)<7=qwHJFUcye-$ zg-}&stWOg(i+;xPR2p@#pNCM*=-y>LtXJ34omheh+U455Qm~7HIYV!+k=ouh`2nH1 z(c|Az&-3~28Kgfh!LwOGY2lXroeFUnU^-StR}{ravab=Y>>2H~x18__&1h&WJ@OIy z;jxhQX zi>MpMM<5=6?n?6Vfhb>j-8^p9kPhf;6e0eN1r~&Cqv{vfBt$X^{S@ z{-iD52vGPhsocitT>ltz#*}N}0cwc9bef$WszGsQHi9x@zHW2^R;%3YSqG&S88jfh z50lE9IeKuS7a&V;n>^(9%;cH@ zdJv(+-Je@W5XyhQbnycKz`2%pYN)lP129yT2xg6REN0ZHK-XKf?wta`9iS^Fb5{YD z!uOQ?nM2y}MA1Yr@D1uQC%0$f3u=AdW~OcKiV(UYimD76o&yX#LLQW@54kN&0wo2Z zW;tk9jvPk|FQ;aaE3LO`pLS1sjT6YuJRZs4Qyo2c-~6z>k@?-K=qBabv2V7=d|0c7 z>d!;N!XJfFgd-2Z5Z?hE)t;BpjIF&c$HJYKswo24zQBm5jQ+ZwAK#Iq7v=GH4cL*k5wAL8(w(&h9l@lF2!Gy$f!Y`i zemtq@X|u!gvNHv8#rkr5a(1ziZ=jJFGy9r)Kc+;?_H)eIYB|u{wdTkV*R$3P9q#3J zym9E`LL-uQ(D-EQO`b>YT6e*~m61uE z3&^WqkG3qSxAmo6A1?WO$I)A9?v3z%yRDch$eak5Reul5Toq;{N8fJ8640F%Y*UK( z(nJladC6*QO=(AOnk~kjx7|qeYq2}RG<3M>*|&RMY|#BJ$@weCIoH>Hrl?2Z_Yi`{ z=kKPYDzOIn;fxmWd&aNYDgMT(7);|w(frITI76-BgtPo*dolDEqD<+K6;Hz1mwKFH z>7)Al+S54R%-@gRnE98CU|E8p_Hy^s6C1Gs3i>~;#5K7iI(fhSe>^*@%j->WGq?$6 zgHH+*X3X<@-)NZ{Fi3lID^|CMH8+r85{HhUuj=;mmqc2YZmY9of_()123k;+?jnURp zMA?WnaP?{o^hGDBjIiny)S1!cH47|=-Z3~dnA)>C$ariss~6SHmuc~=$kY(QZaA1c zx)628=%wZ&uJV#Rg1Z(P=K8D}wZ>sCi5b1g(Nrno;%tu64}cy!R8=>ntKM&j0}?Ja z)RrN=5g!{{hI}n{l)}DZFaLWe_Y5ZYr?qbuuUkpVJh2+^t@A3uqy+pgbRd}MXPohX zukSa%&0PBOwdBvFw*1p|b*c>rf=C>)*5|pd@8ClbssYzibA7BjP#E+rCx0yy@vWGw zn19WYk)v`^f4gf{(|I7AlHx-W$NnAAl(4{g6`Res_gEk?6@;)0wQj0!5>ep2O$NRN8yPeAy=q2&_ArK#m2u6B`O z#kJ3s1V3eAle9}aJE6^q?Ue*{x!m@#&Yqro`}q$-0+-AaVvD6Et6m>~u{kwov(-XU zJA*qJxjE}8yLA|5TZL-k?#W<47A9t9-ceUX>w+yQ37ATR9?=d6K{RHH87G%CX7>S& z8GKQzRpYHhy{)>^?Pu9t4WYTl-qpbw7hjnt$x-`nn7}T0<<1#bvOP;o2M#>W4u~dS znSk-NUrvB!E68M)1!7w{ZoAVwsQ5y=$`kRk;Nkn*n;wf8|IwJyz+~g1jlW>uvl51w zy);qqudFka7~RhAfh%Fjd@|De41g+N_QPM&2QY~JS8BTpo~*ABf7JrNBOD(Bp$LOX zagkd9VaomNBPy1QuPN)e6z^67sN#M%aGNmCbb)|{769`5^JE!3)W70IN%F#wTMAc@ zbXO>7PXM3RXMzDaJ9+C3WI1oo<>@MIz_?J`Fz$~W2dVq?gL-UtR1$=8fDQrexHQt6l=*(vB`d-k( zbG@fRRP5p_tbp+?p3hq79>-q(F&`KIi&f>90_ik25JNuXDKBLpW;%`pt|$4#cY%`j zNc{Qrt=rDYNu zJ1OHWKr|PWf_fW7vF9XZw3*<^T!ge50ZZZt;{y)y#CrAmC#;WE{5)_kxX^9@$}RIa zJNdh-%A$(~+FXMvl?t^CxAyX>oVnOj&M5_N=zjjGI19#s`2iBP4jLyX_G2%vdsV;F z#h$P`X5`)^>Ia*?!gRP}sLeosZQtuAqPWXcb0m8&+~Wr^gTn+bR(X^66?gK&&? zzznDYvvSHMW!joLqzYzGtLxr5zq6|PUayNHk6qX7&ce`&P$cQjZ4 zUz~_yMD>MwrYVCppP5rKSgtDL7xxm7z^%*eOc(LtVNx~qPtNH8pP8%nA^GCkpqG!T zk{+Vd2nBqSL_*Yo7pIh?4*1sga0CV=XZg_*g&(0JmqiI+BLaMDS8flp5UeW0&9t5; zzT(!3m4*+2NjG_IErp_UzBMVf*B3u93(Y?Mu=kbD*#>eDMaD~hNjR7W#6T}1rgbb9 z5W0m(Gju|E4ya~f5(C0P$y~V}Vg&Iu2lOC07zOKasID}O{O&TwcgeE+p-F`cnWkMlKtJF_t{0K(ce zOG{=j%anWLITq)-YevB@zpAX#(2GY2x6V>AUB-roDZcl7lE>YfVBK@;T*_1?gF%%Z z+ACHC5QmfDhqTjb=mUA5437?mddXlL^rVpMl@VQiX~!6lx4l;0s@)mstGIq04M#OrA_M+TR|+95h8@)O(J!b2{zN&;ot1y*60Xi0!5 zurQKU0UN3QC}^l?yz6YdU#^-xS0*aFf?TM^Uzz^dHA$HBTyc%1nW63Wl9E_u-}93F ztihDg5=kzURb?)Nvct&cYNIR8sL=Opnk>)BGnaczj;l7)`Z>4LB{)KGl^w$BmwYnsn?S#3RZGSmu2zy?jyc%AGUV;y;S}r+Wc>Lw$Tee$F3%AS`F>O#xGA(6cxb%c_j}&j zqNd0$)B86se%7+>?`RmE?@sbC%f!=J7tiA^(+My@8Avgx)B8;hw{F5+M1#IJ;?;jn zTk4gBf_%3}aR3xjZbG3*f?)!zZ0l>~@k`x~OqA?tQ0jRrpXQkL;$seSOR2AO&qo2q zxv=(H8#;#dVns$HuUJ>HT2*|o%Bx2UOE}O+VRU)_0-&8s>#M|0l5+_e&Vts2hjgG| zID<78F+$ug-x6^lt?=ivW<;!Ul91;ABEClS=sr+5(rdCb@d2fOQ0nP5Y1e@qF?UMB zZ0HNa8A;$pp|m#f_=LXUMjG=Lb_1s5+cRnu}&5Cm^`+`Ju->Y9}LBt~FHj4(h7xuxQ)My-@FfG#P`tJgC1#GndoT?!vS3a8-1t*Bf9P(s`LF1e>N@sbECU5D(-zhy-b!Ngk@^ogt(0UIpCSEV->kyR8O(zv!^`$twsBQ<0cr}zpvI62zI{>sqOI9G4%*l%JO z9(Vo`7jS|ycrB}VBC#+0AsJoHeF3`=|7=JtBw7oa=veP<#3NY_WLf-&+^|1BH9*T4*Ter=@Wp zGIQb9jH*PTz@QR=Nc+MM)ABq(I!J2kX29kegxpeZjSr#V+N3=u>Z#)Ak)j$G3lTJwOM9-XKItQkvk^T(yMuCgPg^^lY}PAW$hXslb3=ZOfNca)k-MylM3<%j3kpBJu)n7rPykVmTzHbo z1raiZ3^Ejmx`sk-O@-8uGLlDld?2)t?8r91rpvH?Ktx;MiM{2}aVaj~1Yg|n9VBz1 zB}9c}v`2=p1*vlZ5|CQJ!;`IOHORqAU7>-&mmw(;E%N;&wd*hf^ne%vn`($x$H+$p zj*+Zf&s%P^<_4_1m0|tAI;d6UbuC&l6zF@S-Ukp#@@NSL3NmO9hs+%5H-$7>q69$l z%xw5M2Q$qc7*rWj` zYbY%u7T;_dtAjKQO;_i?iEREI_JlxsIV$VKh-Af{yboN*d-lV{8)$7Bm;-1?ZAHN- zRzV)3mc?XnH5h)8hbVsq)y1}h0ovBG2QWi8T4cIvR{^%(^nUo}Gecb;!1>ji?%4d& zAl>@S%ZAPRdj@_GK9u-PQ^nC{<8O)G;fLx>DK`vVH2J}tbSjjGDZb!R6JAQR7*b?W z-6TBxeY}!=2cmF%3E1HvI!OyabSSe6unK8d)E*yuKyAYURH-aLZ+I1Bx05A16`)VP zLqmj}{9yFMkJzt0$bIKv%8_O6c2l?|=s>EST_1Def&#+>r(?E15+XO%vC2x*UULhn zdq%T0nzly^`T?r3-a^g19%5yVCwo^DHk-gZDQW;lu9}yiD$ajpYV=Tz`7ZiFa48mt zf12zJv-mrD>p$)vomrh_VRB@jMkg%?0P}gwP9FCv9%3lnALa*YF?YB(FVaO!X|TnF zjMrpw15iO6f=?u&d3mxt97vl$n9eJ9ot`$R9#kg>e!Xj_rdqgnF9*;c_St~}94eU> z`}fnM2A~Tb_P%kmp}V7lYY`xb1a}MWo!+izVvr@@?wo$wP6bi>s^IU(<7JET%HNe0+zi{0KkcK{Z*Lf5frzXvKa zkbVcl(odqr4<7SFEZK(le!foVxh8*$mr=}f5*$nqV9d6M(52;;-)YW+#A&fsF?p=N z%6F2lPTow*;TGcdjDc!V6X4N9KDamUBmuS_b+N(m!#+pfUCOGp;t~to=Ca?JDo&WI zIJ$&HzEQ6n(z6~9Rv(KRlV2Bp#oZQ39Yv?EJuL+rNGWZg_XC?7*!=o1|FMwoDIm91 z4q>CR1JP{EOnfQExWq`;`RUirI1fhccW32iKl4-Ed^$VSN{%Ly z&VEJZMT>O#{Pvzv_Wj}Xg}`FkbxgGnf+a(o&iam-3!n^c57@##Egs5I+MD?vRAn=* zG;n>n)kvSq#ill6r9KvM6W3;6=8oO-g@!LzMLAN9*5wcVwt{HJvzLLy3wGtX0>ZL% zPk``f7%H*llW@`Ds6KOzglyHuGTpMU$hhjAkbgEU8WdK_CX9m!L}FGm^R(1z<_k0a z{9rX(UyREX^;$BA8ouRYnQeD3@{5Uyvg5cGYQ4_ zV#z_HC&Bw*M%{OZgCpoG#|}J^a6U+3*}7 zTAAzbw^mv&xPM%3>bM3!w#1?g38cDS!~!-tzW7!ECHOzFM#Ql4s`7i_EK`fwzF*{x z_|Yg-SA$H|dq!ygON|qmsjq~TFwo_YqV@%!0%)N|BSwGHg^C_p^FHJ zOrAO2u5!^p&KDR^6=z>yantjY^@?{(@qeBZ14pQn6L%FtU*~T*S>h)7BI)@Z5VQ3F zYO*m;ITk*~VYcAWoPt8YYlBiYm;D(IIvSBfPcmz1Ms9p9{{xYZ0J?*Q#UW7fvjQ}( z#Jd1cng+Nj8ONmizq`-CEhfxURJCQlgXze4d(~;44bStUN%~o;yvtULi|JZURQ@Ky zWH)Zb1JEj`sot3u@H9&?bmR4Muis44F;c|$->7*N5@_%B`?0o`D7`KIm8w;#C~*Sk9^T8vq(I-M?xTaQAa>`r4y0T%-S-7`SeBYSaw zg1@8`(qD3=8d9%INtRfP1BPL!t+-*w^vin&>|^iGg6u4Va_`cyoNg zxS-H7@NDYk>oX7^HJFsmK1t@yi9}lh-ynPjC>t18z}#9SRH00Gc9>nu`U9|KCMW|E zIrAfaFh$q!iTS5y!{QlYti?@RtvU-d3t-OxjF;tJjr2Td?brdrRVsc!2G2ZJT~k;~ zXJi^B*dt4#YDUAa=lOKFP!rq)kZ@iW%KeZCC!jjv9!wRnGA(Ls9`{)kZjkE*qb-FH zZ*qv(*(JNKS~;2Eb-Qogme?P|4J1cz;#z2atz8Q<|JL@~zt=mC0TD>2PTd5ID725Y z@61~dJK{Us7EgyxeF0c7{l_fcMO8ot?a>w@JiUVr+{>XMl^e@=X+%YkA=CUjd(zcQ;Up@lx43p<3 zAb7e1ao;#|El82CTaSDc-K=A%U;g_n3LluyH_w{@_cMh(AqO;_OvtNQl)9#@srSjIidm9XDO=Eme&3G< zI{QTKT8O`+NHtXgsC?!p3Jevzc|_1A^B-%m(81 zfCwd9%pIzFSHL%Hm%)U^9SGIr3EXfmn0GOSX3Z9xBR{@LvngKm0g6Oz;$z7G8k;NU z9O)+Q%lmgJ-?5mM%}??%z(!hgz6%sHSTx41_&yzBB4&)a6LHB;(5XH#m<8Rus1ET z-|(+2nZEuXs@DfX898(9cF=VcJt}NW@&f;4ZIgQ-5xdj1b6FzzU&>N8@qoj1YxwQt zI4C|Gh@Zno?h&|N2oxJ~U&md?-h*zO;!X_%eX(=L5fFG$ZRBYb8x8WG7I|z0ImRl` zE@dfkh@ucI-c7_m{bl3D_vWQ9;-~1LOC^XI)1WEn`U8j?4vZ_+SAG2rqRMj;_UONV zkF)>s5{hj}_H#;t3li=zPa)QFiva^X-_}If-S40a2Jwfh)IJzpdTEhXWSeer`10Hy zIZ&Hq<^}y_n(EfrYH5}b?=A^CIBr;v=#|gPn;uo|57s6b*$1RxOrBV4AK1W7N38F3 z-O?pA6xJ4tt0~di5OZ7?{iM_w^&2z&kY0Z8icFT67Zjp^xnR~r1uJR zWdRh;5bxy>T{@vadF=v*i*u0I)ngG?`)&(jM& zA6U5g;uy$~tIh6SBp7V6Ai>aKwFVdPz(FTq70-hH;wmJ`99m?8LDs_+lTQ@8ogIm? zWX9?NFegwStf2d_s36M*LeKTp#L{Kd%8qD%8ciMWIs;sSj!?)xp53Y07CPDYSnZIj z<-~cnz8}tYT;?}fG7<6eF8z^LH83di4o)f1z}OMQ(~UBI5f+3=zNh z{S>(~y)u!{T;8VJbZETgpsMB_(S_Mj?@&)4hX3H6l=cBZ%R0s`(0E4nB0#pC-dQ6B zh838enMIX~D)lLn1=Kc^a0ClAQ`-HAxHU+I@42%|fHczYqc|Cw<&$M=8Gfg19)d`B zVhKxTl8`<(Iewx)rL`j#5z-P_MU%G1Ej-T*#fwZ~8&a>6Pv zst3$w*;aqek32fRTL7N<9bk6CG>yl;{yjO`j`6ee-OHnK7zhd640l%=1M!tj^Fd9;OK$j2WjVR~;Le>i z;rUR8c*b=EU<6-eZh1z1Uf@p%P+(a#zB3Lc@pOS9Dz0iC7rQR(0P?|QfG$_0HUzOl zJgGDQ;@sE;l;vN24jX3#WA2@Ku#8`9oIQT!W}zSpb$}4qxxiYmb7x@ZQq+s0sKpzw zn@7bV{qszu1`cn4EMf26>;3%469139-D5(uf(+;I!DRXJZ@7ML=A4FOU49TV)%<;S ze23Zzi}J(86ENBNb29l5Wb;Cx%{xWp*RTZ`z$gjM>G3}O0Tl8Ho}d+<;BCGAoI1m? z`8a+BLhZM93?Aksq1HS|$-sZ=L&i50pAEXu^r@DI$J`)~Rm{-S<^2!q{XfFc!y`bi zmJViD3#t&76)RZw0M4y-+Vm}ACg(TsYo3*>#+3W5&erQ7rf-2_`a}vKKGj%YODe6q z3YtOCbOru%I>qGpXdhJEILV_g7l*XBVokkI-b#5T-N%Rn5Ycoz^&H|v5F{#lV{Dq_QaqbIhJ}4Av?)j_iKY2 z+Exfs+P#$`20ZAW=(ktkZwhdK{|}OoQ;! z%3Nc2rK-QuqffLuM0%c=Ok<=KvIxESK*>^dD zXih1RCWdTZZD0?~X^6(f&BlXns}iP?(`98K@uFvzFHQS4fT#{2GJ2rEtF??Xux4Zp zSj+SiUyx45&&X4In(h2ih9MqQ3T{gh>p8zZFAyoWX@t#A76;vsK$s}H(4^lj3@L3i zxAUX31((-wqp1#)q{Ds?B+GhUJQxn#Iw-Nbn*s5Fx?3oCm>If8kbpv!8rh$9!&r4H z%I4N0CUmB|m&uCKMN{|z9nrWXHgu_$1%Su%oaZQKfCZdqV%g)@_`6J7s3>zD zMgnmduh(8d=3TB9S;+^rYdK2+&5wzv+nKKx)2%y@Pr*7S<7dYsBuVyLm(Nf8$2Iuv z7;{Q#>3>1SY7~KEwhNL=Kvr4`sZd1#?H(Vo0Uwa;PDvby1_~-92Ryit>_^xbLOf^? zh-L!h=)F*-&X~gNt_J|_rviH^?9mcUhCJsu*JOeBnpm7JDIX+prH?xr&(h}?X*y{g3apb|?yJ+D`(9;)70E`0yqXBJ@ zb{!WxS6vZc0MMpmtp-v;uq)Lk&Of6Y%;6@uW|mzU(7cD^BJ*k+QqmCYWIJSetO_cR zSu90=mo)YOQNtYh9H2Eop4%tTWCZ=xgW1q){21U4$n(l_hjZ^YEk@A9(PVo%6kNLC zRAy4x(GdQi$RTYYWv%N?_-z?jF>BJ=8yt91xm|H8-(LRthvzqAHdNA^pa0_nCXgZg z%G%+j3^Y%V&7<-HPg013G!0`I+?~N6iI9z>7iPhDH4yi;QSeKC|5E^v5sy zD;IyO@e~c*DQO_dNO-aH|LwOfd+Y!HSsr5Wv(drS{TJc!|H;B^YB}8+PIFh)&j)u@a=AA&Q9!&rCF7s5O!u*oj*#=@75AznHE2yy>mA*P z4Frf`KvKs;D1Lb?2q<|tysh^tK++rpGHeXE#Q)UM{E;$%t~*DJVFR#B5Wa?UHon6E zbPo;cY&OVs$Mk&aiDOEiL0F-GwPOE;44HtsmHHLLC`ct@gWgO_g~2*dQU&N&stXPv zszH7fhpg8hDD3v`65YIxY#s*9-~X@;fEnB4+MH%)hJs#B)-p6778fJqXtW=%g4xxb_uSTp0})+IU(Tm^;%W>mz6;-fKv2Y{N0^k{*_*Z`P8 zAGj*R7wbwxtHX$EX87Oq@c*O14;|uTl236e=l;lw3~G{=e^1cIz9~a4iFYZAg?vbr z)_Hl`I!KZmsb1boVcz?|i~xwo{RdH{5gH{c{2dKh&XkXe2UDctDl9)h-F~6MM|s>q z^ss&paMK?IF*&P~{ zLuQU%?32CxE7E5i5gin|?_Zz@_saOY%lsoQ11bMt(awzrwb#)lUMUR14m_-uMGxo# zH|;)>yUzGe;Q|Huepr>CnXrcgk>~-vk4$m(;27(jMke_y%WICDnW%MFAgs{8TCx8^ zhEze_%332P6r`$aF@ipKNvhlsMnEd?`@BDig)U=7h6}$Rx17F0$HR);Qmn5L9t*%9 z%)cws>*#2(Ar4ESHlz9%{NwDNwgloO7ZT~dim$}e ztupV-GECO!Y&OT6h|j-&tTY7C|9U6h?C}B6Aqcbf!u|BM51EEy)72 zqeoD{Ges~49 zziwvUC_qK+C;BKj`?B1hjur(C`HLCj`>m2X-S>(QW3Qm$NPX;G|J>U^g2okDEsJty zf}3YQZZwGALr+h+!U8jgL=AZ7Oc?bG?=$F4C@K57ghjM-a z8a(9QSU7c_hT#2! zur-q8u|LAZhOY7Qpu?%qJE(;fOzpKUxd5&pCK;&WXe~a%h+pT4nJ}Mx{qO#G^Yn`J zCybwWQ*q9>H&zeY`e7S!Jv=noI?UNC|2%{dGx8ytdal<7NU|ainyOfJJS7#JB@m;* z$5GAEA$lg%LCtDCz7Z)STPYrK>OH%kVZ>E4^X7slOh$_&yC2Tw(L>jMj|}_D!U)M- z?u!i$rZx64e!&`UNE@37Hh|Im{VUCj#bX7KNA5o4fOjKsc4GtMi* zsfLc0Mdxq(k>3zi1}W{}zbe>u!rD)(91`Bp;$aX6BF~xy!{DZM7|O|4;__gw$Y!0K!v)tz&o=Z310pBDB@@%)Dvki76qCi2rfip|-790QTg~oxqsg*wV zz6%EeeVteh6YVOo({6)Pl>^qb9JUxD{}zwe3?{~y2ocO!acA7U8hH_VsUH6pMg@7# zR+XcO|L%`GkPX2Zddfp`ZbN8mpzSUi@v6Ro5$Z(n5$~U^{-AzV7Fo0K!Yw*Ui1If#5v~&7;|r1We*x_gH6Otne@PYWS&C2`iwukl3oJ>i zbYW+^mgjkhLPak`L-g*=*}JGm%Crw|Si7!|M!ivaN&I#fpgr;x6PPvIBFWJYo_D{6 zb}8R*)mHKIdzZkH3Za>_OfQ(v_Wd*octb@ctu4Dw`D^E;u9utl!A6CNd{gra5&2nb zKUHWuR$l$&ONhUTu=8RL_>E?7njreY(-0D0diD_L{W+$0$VB0ruYp;!JdqupH=A+F zHDz32w_5XBAqr!DE?F%&{}3a5zu zZ^6GRKQsil6bgK3^4*%1Y5*NZ0S3$dEY*=>t%xpTQD3W-$9eDC{s6I(lOD%Jx?EI3 z$TNu>6RUs-hWMi((Zla|$^Ws~_b)8A3BTqo3^5s`rw~Uc3b`wXayB2c(|_;p>&Y71 zZ)t+IdsLpg%OkIFyCN&iyN5D_pRwAGmh!Rx8d7U=i`F8Lft3h^X3Vd_N>06g*DYL~ ztg?)GH|f8jRib_E&nqyAk**oW)z5kFNt|)MQqoD}JZm6}TEG4ucf2yg-10uT1~J#QryXH8PWE% zPl7*yHx5%;C7LYay7t<&ert`%f5`<<=*sf^Is!s;2htdpsK&_Jl~^@|FK6+ zUVeX1-5WslvzqO9{R(yQ!-D8{Uhs6-!E#rU@{G35z!)2W_qxMT=W{$Vfx(pPPn;e} zH5Zt5#0Ge*wS$AuEYU7=`84}RHNEgr(E~4;j2#kPx|93yZ(Cfxz{c5tz*Pf*`{PS* z>(t;__XFJS#Zfx`0SJrB+VJgO{AkQO9$UuN@tm-4?3&uM+_?DZB@Rd3V6MgOd#>{e4Y}qq$sj+-w9ZqdFBvWnrN) zRHAQmD9SVjqG=|DweJ|{@fNF5OHdkrk?GbEOsXNKEyBKe#md$C5v_S*4EVTl1{%~XoL6P z2V%oF80HyuU}e^W7#l~i6eE@Zf-qe7)U*Z>IyN|VEe}_Bfb-r&-BSi}aO>t|Jzxft zHEoN;c^mZgrfRy-$d0bUc)l`OQ{+RSzL5DkNOm~J(GN~Eu!8udN6kK-dX;84(A}~KxTkNcs6^j#~s*y?m>BUd%UD0`BDxAH=@0ag>{BiXPdL`s4#} z$u}df?_7p`(U5<-n-=&mNYVh>Lp-;P>;LSb^o9NVyqtRz+ant3lD)4o;#aByWtQpC z^ngH%oO!_R2{{H9K-M33d@ew9faH{mEAFDUp@#UW~!b(`GhVX zmvBh#sZ!m{9CHTcZMVTpKe2?i(FzSm_aa&%a)d(34;!D3 zrD#V%B-NUBw-4()000p*2)5HrN$;%J4#_26LFK-Nbo1Q2!JK`-%^%&3rn#3T;`Xc1 z+$oqU3o()k{;UQr+f7KO;vi1lzWE>f7xrtkTNCEdbx*A=Gfc z`Dwhs)aF3<{0_hEdC6~k%wcC4(5M1g+H~a!$CEjnKSUAFvwe0Hd&jMU1Kmyr`rdt7 zrGR`dCc|@|ODo9$S;^=(v%|DRWWY+!2U4+Ixo|Q>p|Qwai+=0LiiwuRFuUwJXEB|H zjuT`@0(On4)?qpgc-~j$KKmm&E?)rstQ(u{PJa5fdC`Mb$B}dO<(Ib0sesF7EUuL# zbvQH;N*vMJPgX0Yqoo(5XY!h2zT@6;u?Iak7Omn(is{ipaKzr4HTvk_7^=dnuD4U! znwfj^Qr{1`KWqioj#OEyuMOk|8D;)Ynv75e)WjjcE_(aY$GGJ=noV1DkCf;<1TMSA z(7Zc2A9#`h(EX+OuR95#2`+Oq>JPmETZhj*)XG zQ?>JwlD@PQYrD{Q>=YuUQJsWrhuNptgf)hC3=FG0NjJ6Ws_^M6YubRgk7iW5H z+_J64yflKcZfmC?ya+^OctyuEDGv=7JuI>sd}UKRBZ8PDlpm&aL-4O9groOo2+M$& zF$>iH0@_#%cHfU$b`*rd>K_XjoGj?v z3i5pcp$N(PL9TM^3M$k-*MhflEjiUSOO_^U%lzjuFIdM8L97#I!Dl_q-4_R}#in9~ z>Q>X;tvi&Z6ZC5-*!cv_i^evbDgNUmI3MdQ!6JYCtj`#KPoM2Gkm7U>;wBLxEQ;J4 zf2m!%l=W|Zh4mLaz~aPcf7xvR7d)^hd;8f6g#M%=90lFX&}#!CbAK88XDtHmr=M90 zf=a0GIDVhWIh2M%byx1L(R?=^kpae<=?wK)I%VntrEHY47bhTo9m>?PgTv2<0J=Y# z#ZaQLq0VPZD;w*k?hORHJL70=04Fszmu7XOK$U)T4hn??fEA+m`|BhAmlayFyE@%b zDKijfthX2~V7Hm&H^j1<3M+|Mqh(!SZuz0nH=G)(iC&5E4>P)^ z#wHGYo3a3ZKkd$N(z6}pa+6$LzD}Y(VZEyWuypG55y@_`jBg}B|M6%woic-VGhm9N z>D8GU-FBgFCz~5s>D(wkK?9qDJ#|tGP?HJV9kCdwyX^HcnlxmiPjdo)D z^dIAf8t$yQ-MJ_vcl~H{I2XfsASJm!nNL#GA3Ym%WjZHxzdSaVP8Aer(Yr2v*7iv! zJ?)85rTx1X-+l+kEmGv9e;wzdRP%XrSSB4e2XdR=y<+cg`kk--P5LOF{mJPm-#rcT z{;>8p`dfSXooou^^n$)kua$)?Eu>U125lb@96)H;E z2C+_pLUb-F#%YxgUm2)tIkXAj+FYOdSm{^!7?rp^e?E?vZreUJbZ zL6i0|!>@xXeaG*5J`rVK_Hv*Mn&o0wn8H;7LZ9BIlhNp!f~M20{#8~NENIu_tq}@$ zzy8L^4?e#8kA@ytli-o(8owZ5OUK{qP3EgPJ&ew5@&w|DCKR8-U-{$xWfJ!PF4nSI z<0g*=?GGPeIic(Q`#P>GF`L#_PduM|cvcXiaZE#8?R5un10@_AK-8L$3_NGn#nGDO zSwh|;0wnQ8nsV#kZj+GS>!C@u=I<4v(!c;GF4Lx$0dzNi04s=?ELO~vP+aN=?F-WE zW;?3V#{;=EY?-Q|9^j)BY<~Rk8L9)$c&JdbqLTI_18YwNr9bCN=`?8<_AU0q?1|{I ze@|qm3k^i>!t5su*SDflm&C2FR)4v#{}x((Z89|;3k zbMM(kepO8idAb1}sl=X!PN&aR3x0wKrBcTq{mBqTYkW*ar0J?< zOPD_tfzpss2>?K{@9U@B;mj z2m1sv-?SOusneQuV(gF*KMFd4p^7h*ejxpcAH2D-L6W*;h9ZXsZwsI%GNuVzrGFqF zZ{&x+p{8BrM1KRa`M&__4$tQoKR7{dfRRq*Xu9y-&jDZyS5CUxOR?LqDvfH86vOPa z*GodfPk;R4{r}<@SJtE5&vr+1NUdiZYEL6|i{2E^0X4YWvmS6|b5|wFrPfe+P33TE zM3ScG+kq93Cu9Z64ue$n_&v>ySPH`bo7q}9oU(ytV^c|+CBovDzGOc2T~?jR1CC)7 zaq2&&t8713AKw#vTa`*PMXMU#Q$Mhxt1}*s(=6_%?+Vn45~vJ+X8<1w&Hs~!@EkP$ z9R^LzvyDee^in&*N%y8)ZDm70u{dt3xB;hkzxy%U?>&|IWZjY!i>CI{f~s7ZSsty6 zS&`=;SUaU)AWq}4M%qg_M!u1e@pVhe+VyH+-M_JwH~WaEvYu>?22a-5FmrumNRF2G#N+{$Orb?E_P$+|K=E7y5EN7^kocz z&59}ch&>1VIo1_M?#6KLFc6I76t(KHKYDnQ`ma&{@gFqmx5%Ji%k~m<6T`q^td2tx@XZD(R0u=!$QvB=q(`c%5%PoTKGPauKLE zapd4-VKG(BD4W8s*+cX~|6bL*UrIAYC7C6TxYg>@`%%!_%i!);2zbCWB z?UITFfyDrlSH!bz=9zth`u{1FH)^!|1+pF@76aevF39l@%J3gLIWqN4CTeuo?IF?* zt{{uwovGgd)g&X2EXE26(Fj@F2DtGt)rz&_<&t^DVR5TYz0)8lUC58|hMuQO2cXr9 z2wP{tykjAuJ}e>=W|RJL|DThI13Q`T(B&}TS!lq?T>W7y^3TbHL;VZJ`cY@lBD)0r z%{WmgLTr73AtPM^(9ww$9e^vc!I}($x+w_|6j7#Fhh~1TUCiTWtqUqlx1Oo1bl6a` zIs&)3rUT5RN5=>jb8J2cz=JL9yOTg-PtVhB%^l;LGQ&=G(XG+1lE8gFU+IYr4E)!oZEFaKPyud5 zY&`sc|G5&le&8zj&mwdEM;93&Ns=Ur;&O0XP%f)rN^bxz(!R_BxErHZnSq~LfL`w$ zsHD4GaN4Z0FSP!kAq+$6w%47oJK34yN<+S;x)+$sMuCU&sSY~v8=CX}4?=UjEU;AF z?QB<;f2K6yTzrGRq(MZp+(@ya>O_Hg)M&wM({WLw_`WCOt4rq04IHkkgxrfIDIXB& zpRutk=e%wkOn-nzWVGpJ@fPgF)_BL<>tztVP9BG<6&ehdg`iM}+XKW>2j)dz;)g%NXD75Wd5+d@DT8<*Y@_%DNyLG~w#%`|(C4hJWX`3KXGZ~llChW!DG z(AlNP9QIT|`s!>icl%d`G6*08+O?dbU~V(G34PYy>Dz-64n~W&m4Xhp*RH${>zK*h zwjxK3@Q5e~p08{w;XJLLE%sKwff#i1@&V(83bsj-&Y!6MA&BbxF9Qqv0s&~jQ&qWS zIrQKUjlk%|<9e9Id3p}y{ej{>Ff-w>F(hupe@#X4`jzUB-$0@WI}EIiHY)uGkj21) z;n_w+ui?DoV38G0FZaWLBdh4YkyUhG`&%+h=f4!3^ZOUgwjf39jBu+KIj%CT@33wC zIUYtSyNR;E{A#8#j0^pCk$Whs81puAWM<1{X`cD_FMqQ`O3qwFf54&7c`)=%%nE7y zfwvi8hXTHkg#9&>aF_vza9o|wt;e3Aa&jXffBQ?i&ggsdF3H@x#n!tm-j;MV^i-j2eCc1u&%hyS~*k72QF$IZk8ZzGro_~rh1=O6FT_LuU+ zN#re*H~vd)cYA&*wDN~OvmAI(jq3U2$1_b3+ILM>>{g=QE~1Yc72(}R77A?1)dLdl zz3iA4VCZ1K?17D2*GgFR4`2)<^Q()x-@ib9Mh1|QhZE*62>(u6Dc}#>B$v)4q8t_S zmwnXaO5^dlcTMFuea|zrN$v@}Sw0^8H!h8Izw&*8^AC7n3(!|8;NQZNmWnjPM$=07 zFaHM^jlVs8TfcN$VrwN8`J%l5W(4zRwnx~^? zmf+M~F%X6~RsXLX`T{fE9{iVk&3X(A(ih)<^V%yq@b~^4&jwifd72Sg`Ve9I8DLpu z>+*>IeY8w}HWl!k+)oh}#sBa#OYNv@j^V(;3RLTUCG^~>dTKlH^tkG`7~@=q^|Zhp zuqOXUsqv3eBREx|)SwpSpd5AWU%2wG9_1hO^8fSgd+&R`K1?sgWBvuB{(CRx@!(_M z%{E*JnZK^JTUmbo<-hme{}Fb8|Mlu+vpj7a!j3S`zt;V~`vMmp6Xp4fQj~uX@Bi}Y z|2to+OX8kLi_w;1<-hFBA5F^t_P_sU{QsY?|9oUPm(8_2XMxDCLpwZ~f2<_P;?F^? z#z_U>f%Nn`QINAxfZ5I;8aAW(>q(wbdjnW@{nJ`cth}DH!OB;z&W+LK+EpUYC-h{W zyAhb8OY*o#$CJX~>21k=-UaO)SD181i0gGqA7cHuDcK|RknMlhHU6?visrNZS@`9p z!%K8yg;U?ST zDhwfAA5-DSUyj_{1^a{4^OR#-Yi5|1%1`$L+P?>(1sTS`JvtO_egck7T`OLd7R|l% zG)I#_Hw0gwz)AbDolgosc`H-|;YC_hKs~3Gx*s9%ENihAlFfQbMV&jq!yAO_6+9_| z@uB%+S7v|oJTJdvfmv@iAZh0&|&R@*`6p=rd8~j$*U|7*oo(%Z>B* zvo$F0MMP6+(b~3XAmj2GM*%%)4 zRqdn4qwXm{v^3z1_2_TN>K#oD7FOC>h=>#k?4b@gUO98=dm_Th46ouJrt2#?eKpSn zdW3eLUNZ329>G8Om4l%2_JOZYPQd4Uz&X;sC?)y=!Wsq&Tm|GjfSjelKlu80HuF?q zP}h7eeHnfjAB^Of^hZquj4_!K7)(MMW#wfvA$Y~xid8iM+q#~g0+sHAtvjTAy5kF) zQ?#lSz=5s1X?10#3fsDqYw_D0&3ciI{>fIP*X^1qUAl?CH<7zdrstnwAIgADQ z+ZK5wAl-D<59#}TU^*T{;8W0VBR?eLOGYg)+xn({$W{BTyxV>m_vnv0iwJ=%QaThO zLH`j41pY~Ar+Js$TzWnzHm-rhX5WLL<>lYmOPPc5 zavaVw4{XW6>L8kxNO8O=2#Fzyk$D;sv z7XJR%w}89+P9Xy4_W`zzp6mY`dFaBb8#{V|7vJue(Myt8Mk=hS{%Ku@q+?_X2>a-aBh+M<`V z62D!Q55t$3iTEZT~ zo@__+k_#`;Vj>H5!a7y(Kc$>(g{oeHjf5A}iRMqf!bzls!hyFs1fD5@){)rGon~fT94aeYKlmX&TgoWpzg9s=uhh$`k2V2}ds<*Pa z1rwS)nxAj^*hV59%TzRD|4yAG3%-f;ur|)C#RoB8qs$1xPhX;-0hu> zZs-jR5g?%tt}NCnmxaI$Wt1T zIlt5bC*PNu$H-rN#r=tFW zC4N2aHjk+e^+?O;O#4%FDQUdYh8MYJEZIzRu6fD_tKwNDJsPxLjQaWz^rz(Ng{9^k zL6b7K5$%;%x5^8+Np#klZg!4K7CnT9guL!#c!HO+=~*?Qdc4Le8OVrtw@1vExAQd) zr^n0=R=Xq)Ik+{fX|eNl;ay0Nk$ zc;D};NvbKo<4!a!S@P2FE&+xt6aM1x6Wz|Nbqx@Cbf9_M@jIR}Ji%Kk(({78bgj^# zPjTc~59-;OLEWA(8oQ4KL?e0wcf~u;nNpTEC|zjYs>x@$4u%mY5kV`G3tqIoN>3a1 zH1v;|u@$rr>YU^C(B%;Cd0F&v{K6&=M<{i$yoA88|6P2mkiXFrxl7cmgF#ZSti3k- zg97>?OD{fM=abPx6)fgE{DD=&jHkDJ(RcX0uCi)%0LI45+Sag1(R zWn)#k5}c|@joS;YwNoNTn!S*4(>3H7)qD34@#whUMX9e-Fa8r(+iv< zb5KfoSsjB1xwry*UdbCC@fy9tGD^=F>s=CF82kJAmzyK`QKylNKB9}dMnGUQDN_e} z__xn|5#yK#W%Ic(*m%`>Z#G%ieTQR!3-M<6g2~654D)4kfzcTNjiSwQv&Cr+=DmnLvutNI;bDf8qbe5BLu zUs0fikPh@YYcN3tVAQy(fbs{BTAMW(#IPXYvlQ?i#}FHiCmf#yr>kYzj-%d+Qe0xC z>iIN4S8z6kOwp4C6Hg=Z-kD<^l&#ND$tYdXSV%nUpK35=K1metcM8O)4%tX&tx>G7 z7|lCILkz8lufDs!vPmerr6gkYf=jM{Pp=r)b~2><7F zDDnONSVkEWi-Y}Em3k%d?q`D5D6ywc_#_%ld$VNg&k%6?sKlalEXOl8oo9t(we4Ot zLkuM-P!Mr`A*owU$uY|(8EtMfUF+KV${1)TQ z(J1;RpC#uL?C%JM1q_1gmUk|)&o!GTCFk=gaQUm3Fg@ALS&}D~yfAv=IB-^=MYJ)N z=VZoxQGEd%1v_`Q7qLzyGZMtqOc_U}{THLQR^HkwrLPqWCcY76qf$byj5_=)tri|Y zmqe|W>0BuVE;rePgIzRfe?Ys65-XB88=;@jy&8qm&}!84U&>coz5ZpdM^I{Wbt!MI zboLhRBEp1y_g6wUzk=YWfx@8Oo!=3tdKc3Ddj=9!T9H!W&Il^bo%%&e7+HLfP z*xd=><&7SNw)=N`LktS{zmpQI_tng1h6~+5 zOfc$}bD0kc_Y^5t@HS3oVpPIB#h9+ozVs>8 z&&+;1(wLp@hmCf-Sm_5Qg9bjR`?l)UE94whB3;WXS zOAm58_AHcD-hPS_5vIAKCQ_3NJu5D4o|A(V^=e^Eehrcx5&Y$6WpD!WsLO4A>7eJC z;*sRju=S7pTpi0pPw_v@j8`R|o?W4d_*UhlR@9;4zd=vpORe6X!PzHq|*s4%7r0yTC5ITNoF)Bn^9Itz_}hHk^a02bW1KYO)gj(!h_B>k+<4eb-ktLJYJClXUH2|EG_B#1fLwWu0z) zJRy)}o%^;-Kmpb=eaq@r@M6XL7|QTxM-Sx9ZsIkwO@DYM0(m=Qf>s#Yq9 ztw59bQR!lj7!C?`9R?Gg*HI++@X31o)gEfy+-4Z%VrE^pPThvT9}dbb zW$_v(4)vhY0oR*v$NfD!m3f&9-CZ~NWVqKJ{k)T8Vm|5Es1Ga~Z?TQ*$<^}=-kz07 z?ca>&47T*~iGJzRX&@80ojf9^eAK_k>mBxPhPeExqiNF{6y+V^7CigHJIj}hCUO5NV{SzzuGZ{}7?tpj66GTVlUP~q5_JZW9%-J>Ecf?4@b zXIsK6y9_uA{=M98+uV*jN4z^iE*d8BOeI}z9J0i0A;tCmzs|CS?vB|Se>Qz(QV3C@ zov4;!o;o^_I;BjS6uW8Cx$62Atb@dVrZ!F-bqJmmAO=;pa~9_Od5>?+7# z3dQ7uI;c;QHhF_vEE>kbPidhtwRE_Zyw>mSu8XS^$5r2cQidJ=j_35&QOZl#8h1sl z)rr|=kK;yLT?KNXk~Sp=8*dHhaF@3VFD4Eud42AoFgzwXF<>jue!hRvvNLhn$2H<| z9#DD~i`4d-gEEl=?Xo5*(a^3s>h9Cp)qMHDFuWr-Bd*b)aj@-Ds~znf)PTK8=ZzKT z{c0ImYt~YGFWX5b8V!~j(@#)gZS}1D!nhdo4*DnXK|m&v_91ex{E~7K$w>MO<=0%u zgbGL6*T{DKB3=(U;6$L4gX-O#nN69*A9JVf_R3 z#7Sl%#7+5k)~_#)ZaTCW_(@oe0`x8vhKbptQoSRP5%oNsc^m7U zk|89LNO>w52*wSID3fH6yw#E=;DAY9rgX8^qQlIRY!R z{uV~fvEpJCYK#8nhKr=tsf3)<%tWNYnNM-CTG>n+BAajr-MJu*Z-hd2k_XG~<8?(z z4pfwXS7sFYxuYsHb&fc6lWmi}ZBPf;F!Ue-38dra7alTh4btzTSju$JK3#fcdE7D9 zV9K+cWd_(aoJYyu^bfB$xmPh?gr4M01wO-l{eV=PI%Ba0lbI+}_J)WfD|heV-GbD! zlcOpH=Xo-rJ5(B6kENS-c7wqy$mlr!AlH$0Vkmkrh68@!k8}&7sq6LtN;2wth4*`x zJC%iZQ_r8)rN4VvM=kHfTfIe=JngPB=j_@+pSSxHgf1P^ZYiTN9GPo_r|pTytr{OR zUW#Eh@Qb>UUrtFvlemladLW|KZWd-T(q1vHV#9;s&e_oZyYyI$n#ceVVf=@umJ{A# zq=zT>YbqR8>S_JsOq}n=bRbv{*t8v~PLc$Mn9JwY$Z<(*L}@2V-BIhV9-3yf-qD*_ zJ|3y0e=<=mxmRrF=flSZ5vJ5S8QWTj0cMg(tIp>^1It(m+5%@U%uv4@DxiFVuZ;F} zWB1T`@Wh~Cp4n;pF!)8BV(;SVtZzh41mYPfzgXX-Q~FVcT>Q7|8|T>EN9rbNj-)o} z%0idL!%7r&3lnc|+Hem}a~fjScL5-%ydN2tsBl9}rkj_B z{M}DaiREpNxA=l)#nerErLLetPxlXc>>`V)`YZ3IK#R8KRH z|Bg*yK?Ve^;afTiTN1$EesK+!+F2x#Zb2P;FFze+yu~amcI0zXisBl|Sn6gi!$mnw z=HZ(+tSYFPk@jnETshz^qAP3oV0_UzQ_Hs437FRKAXk3I7wNP&JSmnYv^?dxI>u+|tdAP4>xkau8 z&Oa^ewb0NfFhAPy`T_8P2WNzS#&M}3+NE!9Rc7%MO4J_Ky%8& zK+-m&MS__Q^A9V6wMH#1jEhL*@i46cP!&cdlE>oE$Q>t^cfVd=^_3+LRd6Em^AQSy z)2znS`}y3z!%7t|YA^f{c7E`3g9WcWc8 zMAw8)LC?!`780PM0y!AN%j-@oJI&I;N^@l>tum-9L}t`1Z!1Bc1%S@IS3g7Eg9ze0 zfy+ks#s%Nb&Y`8;*z@hWtMnqclW|CXx;3IhW1Eoa0JkH%ja|AUKO_po)=L%HGQ&+o zYjlaQJNe$nWz3eKAZkWmf!hup4*~4gaLEd zfiQr4i!p_0ql&P?d;xzh7~b5NJAJ;`CS2)cwh5Yf-nn|fm}tqYRU78jJ|}l$+IQf7 zV1UnJH88lBr^TEq<*nEB$Sa*r;C)Zu5Uv3q-|G}<%b4(LwWcw?m-A;~0b+xqVp*Rz zHoDIg&T==;#VzN}9TLJwsDghsQb_YdvjVBBAodiDx5vsi3XpvEyjOPKz}kn1z37v@c8k+#L2Fm}qP4YbX1vG(l@1$N^yF>u}g5(oe> zYSL@f*Y-Cj5jOWPTNn&WT-2Kfm5u7M^@U}n-2K`E&JSId?4si!785>xkYDM(J{?=1 z_1=|ZzNn$FSlPfJMspWEQ*KfddLlTocX`N{ULMga(&BY{9!$-#`Bnbzt2i3TL{d3k zZ$8chbb(%L$T^`&C(nb;L-zIFZp3x)20DpIMeK2mwv`*>kC$E}Iv+o9% z2DfgFlkRW$g|kV&rrGqw*bAj(2cCGI%^~rP<$kp{iK&;3RY%6sp^eBSphW2>zTotH z;g~X;aTA4_+(-gx|Ouln-TZ?1p1#vNmGDqSti^QcHq z2Qio+T`mggr}5hyZ*lJ_#}T()%h-S9}SUCdG$4rysItd&|UfRn-$eug%cbq7tYjBpE$d2Qu+W)w@dz7)_cKI&A!IN4sI zDPHm{!$u)OOya+Na(i8&@*>yf(OE3!n!IkU^y|zmV#&C`LT;awke{7aE{z9lD!B%T zk$YkqX`Q6Y`^R~#jyPVqf_ha!(<5K3!@6pBQ9l{K{-+l}zjE%N(*e4ytlUGy3meT- z$N2N6{nMVE^)V5yZiXGv@3C$Es++h2mX~MQ#JH>0toouEjejiVH-S#XvqO}#^Z@}G z*H*LAa%9`*t=osVR%o+&KQFSH(Kr zndpzsiPxZ8NWIvOLQb)B+Rx`UbJz^B5;>O7Ybvn?P%%c_<@~(@)vr0@SGjREGn*h*tTin?-JrjK3UEWG!_%ZX9^?c>epb{<_x|!>P{Gp>zZI~ zI0^L78E_=F&k#vvi zLv>)Dcak(N?aQNjr<~*1?8fMER_5DIy{@D2Lsrk*4ux)NihEkGrqG{ebY7i|V=NFz z>^Z(Gfj$~?4!|?4{2*T9l$(f(|1;QwMli2uGOFYE9MmYU+s-E zsg*$!g;aE}YJYr()V$R4z9C_U4dadD9W%8CYFz%KCHavu3!FwedbkpC7!>X)igvUU_@ zfuWM~ROFfID$UKOkgRrkHA_{y=w?;a*-^v~NwvF6eIvDJTbFe8H8$d-q;4*<%dK08%aKU=4uh`2M6mD*ftCviQiQY6W& z6IZf}e(IstqoEUSGCjY1KGUnK8ixfcob*18hzUbaI_xf-_aV}Svkvc4|zIi5HjiE?WU zXGm|DFcqowQrI=;ZiM%ARoGLR-v>{ZSxh;(y&W;DeP!R)8C>Ik?xM*2*vL;PVEbnF zkx+V(7CX3J@QZDbw^pvU`VEyuI+-N(<}{Dh=gtTlPMcYVEoX-=AHR}&N+$?Q?cThYaU7K5vrwh|G4Ni_$#Xj27IDfTdq-|!KqzIO zrvz6>Wa<@vOjrzW!!z7vj8sq;m_3Ko%gs%WtIY`cId8sZ%!=dP6IcqjpZbG>9yxUskuk0 zI8YMB+%sTF_?aQO^n=qhLu|pgv|F5znVYbH=L&#$tDUk*NRk8%SS2_u3-;KG3$N{n zIfw-$25#M^AL}HY&uk2VQdtMi)>4)H-FMlwJzN=0gp*a3kSap2x z#vFc7yB$TL5v%CtRI*}$BYl4ISI>3E6`CeTXmS6S(Mt2VcXP7$6VFp98`r{e__=g# zv^X_Z+pJsHhgY^vW=a}3J_JRd3LG6jQ7qiY?QYzvPFJ2w{uyS%LWzpQqlt%Ntx*~@ z3t`-`k-=vxP(%bc%nxXdO5rMwd+6hz!Mk-gYN3HKcdJd%5oV?N@_6Nq$#<(>Q+t>L z^Ep-?5LUw>+Pd_YkQ;|&H?*`9;u^E5J9;f&>Tv7|99mMTb;R~=KDM>NxGr-jRmJ-I zzALuT8LSd7fSiqF4>W&PejrJKtFp7O@tG!!1h))r0c8>kg_p%ze^8^x)@IYhnwaA| zGkj&!(xP;IVAM&#lraiHMy-yAXTNw-X_t#!ZBZD`$Qt+Fo!R_{4|MV?wu20rPq^}w z`6mgXNEMoRCcxI$jdBf^|C{AfYdtV=?7KF*?->8Z>24|9#K@PGiI_e;jrSIf>ZwU0 zdO-O2MJ7l5tkoT>D($V1mHh!ts?yfOCJg2ftixrNhNN!>R41uBSIx_bfi{=o9I94! z2Cw^#0?ld10!JQ@L?IV{!OMDw&mMeai)_*@T-CMj|R7kFJJU&@g@iJ;^j zEz#3^V-YWWyCj6|Li7V5>+(3yyLq{>ktDU3e>?$IRY$?RZK^DyDYKt*zeJ0`39f>U z+e7^$S+8*F&U91DFA9-E`8+Qj7$yVn1SGX9ohsrjzzWWi=>wxV zJOaD0sP2Hjh!RzdLe|6PdhYHvcMeCh^5CG8^eS*J2_L2W#~QeyD9Q*Er1?4~o7- zVa;QwM^c7s+X>WLu!TCku6DTUz}4VYWuKl)u|G8o83GkvtQOPX$^;)snWU42_nGu1 zWRovo$Z|&(CoG?TJeqaeV(c5AxSJRl4`g0Q533_{TrJTa=y3FL+wS;%tGtiFVn(BEP|78GT3tB|upbGQA8+~gVUq4*wsVbwr z!kOwBe;7{lYmeK4_^K*IrOU8YUH8DRbInufb9d+dc~mF+H2y6ujb4+-q()??ZU-m# zTcL38T)AZF)S!)RjlzyVze3~g=)s&W6ZFlvkq{(3B!0PMR%4U{?wC$@ev{>`K)-%% zr0~%T?IdnHnsMf+Ct!}mqd&YztoA@p=SbB=Ce0U}OWJb$#m1|wj-Z-j52Js{Y5#Gx z9uxvAf-j205fR=swC18&?NfZT;;GQ1a(kued%AWN^4h~QkF)1mp$8;7*uf(_2ai=7 z)j~SUgpO`BsUcx_n1hj@VNyFXMIgys4^3IzP_(fg>>h~2r+BV|cW@;qONF9In?Aor z5#+oB*EB~ew0%Z|*B#SSBIlUtmfCcAEON42`$kOEB5y~Kt|QoZ>WYFg6epv`-}jtR zgZE}jv#!xVbnLVLXfgOBk-$|i*>>=%dc~yVW_gk%?n}wWx)$o`t zeyi;%g91ajhz4y%FBh(+_neRzsBi;9KTQU6Lbj=7$WX3+Q@zo{gt6ixO%}$1uRu0^ z(vtQheOsf(FgM_Q-krI5knMaWNI8FsZ*RzbYe-D`IwMcb?~nq%yI$)ApdarDihU^~aji}xt*vRWLN~;*V(7G`)zasoUN^Fo1y>m!YUXeJrPXBI z{q;MRm%3!uD}I@ug}(k(p)|bFM9^=t66!j)$NRcM_i45%SIS6yajl?#la+S-tR44F zsnCv&c$bB7zVy^2MEuLCyZ@lYIR2CBAW0ow1?s>*r@MIzjsP1xwQsK zi)lJo%}}`=yxN$77*<))i?l>ZhAW>DH4!veNt|TWxHY8*!+fKQ!fWrd>}RLisvJSQc{>(W%#>NZ};eU&Wv0_i7erJW@CMNxwK}wdYw_ z+%yC2q<7GxIts`ChIwal)Eb^Uyon6~GMm_~PAyHsvoZDrPQxb1kCTf-g*oddW}EKz zW;>tyv>*WXu=Ye6+&m$sWGYF0;e*EqxS|fU8`A+i_+-3ayrpf<}?`e$Tg+dg82Y(pQQw-7@9xYcrYM;0E7%O1s$8Xx8nc$$I?X;9*+Jsr}jgeC!(=t ziG!>!#vG-@b{gaZCwa21h&Rb*1*FRK8GL<&g0GlSr;JdX`{%^xLl&8K z*QDO1tl)Odm20(N-VtVo*bW1B6TAmMne8#l{aSfO zZ~dm{0LN^j{meA9oi8;-{;Gq#quPH`&8r0YB#9lujlVJX(PVAa(!1ng`~-m{hr96W z!A4)fiG))@25cBpmGxv(=_EGK&&Ll1Z>U{*I7gQ9VlhUK|3F3s(nVyR#W%|B=y|p~ zlkih!^#Jt^J_3SFS!tl&BqGjp>3VM7Afk;>y>2@d>I)H-FKkQ~R8LV~^k($DUrq-O z&5YSUw3D#r0+9pi5}ltH_goBR!6>C}nIKNU?f7RosIJQqKl<#0RFzcM&z!BP-NFo? z`D(N06~5Vik1e5US78&V6pzw%A>R*=2lhM{onQZiKfs%456*C?U^x2!BL8Bt#D*;8hfJOyE|4wgAkK&u4$0xFBp z=Gm{!TmX>A=!`)$NY&UqOkY1u1Rb zO2`(&`PvVQmW-MMmk)l$cGqwDQ)(_gBHhM)9Ow-6GiOMO32gs7R1FdvEU(g*@9c() zQW?`ua&o^0#%aRizf(QMDaZWDAWas`a;}THnbJbK4GLyo5nw_=Z;a#h%<#?JDoR`< zTWVB3V-zk`CRC53=fhb0NTi^BSs}l6eYn&Db;W`^;T6$c(L$EOXT)d*yXCe)`0O|} zuC`L846%ovmv2|wJqgSgF%m}tX8FIYhP(P?E9c6YFN)WuBF+xpdXjH7W5b|zgI7SV zsxgEPVLDkML>=P|Vj0#Ex5*HYgl1YadcgZDh_rOkt1HfFR#Zjp%k(}m^!Scf!S+>% zcaB2vvF5o2SJ6bS``MuK#|K;f411$dK{~1ObDVE#7ix3doRC;fn1V@)s8gIYlHKT3 z-h3R@@%o`KS;;4>vcJD$Y}PB&lDeSne;LDPv!|ScU|>5YD&THg{pAvQK||E|k}7^c zq^mo*enW5CA19s)$a`Zv_I=K;DlE>Px-ARhKP}uH=Q0wb{B;(=5Xzh}e!3@ zTDf6#jFay)k(afPpa`CB`_gXhI#C%l zyC)z%U{jkWq?5v8$1+K9B$#o6gKyq>Wb5IPS9#lawh?@>RYk!QtSc`ce%y$;)?1UZ zArE5%cf3klhgBM+39W8wQX$zw;#qZH?M*BA@ycR-eXiYADyB%sC)@5gNQeBa8%qkC zc9y&KyslpfSk??>-q=PrHKBGWcJ!tTeF+}=2pbrF1#fk(&1`XS>JD*WZp?}kRAJKevwpD<#e!@ z-08(FuE}chh@Rvcgll=%ALZ9|SW4AR>vBbSU>tz8?SR!##z)AH!(YQ~dura$m`y|y zEk!>c+0F4B(Ag&cMh>J)TVDWe&gmwcic%}t$$9*K^okwTRconY%`3B^)?R|q#6g<& z@y<7I;_rs@r0sitUV(E4y*WTqmbsC~;&!}qctD$dKR55JHIed{7US7E@(ySsO6;PmZ%7q_8E>6}T3 zNGD65UY37MGtNSP<}Ubr#;-ym%!1mX#n=@eo7kO_s9=6TWoo$&(1HM57tg+2Oroqf z=z&1svV$7Wv$=zdxFcUZ8@_#Ye?AC zm^hqJkZ@$kKFv{lY{+MtBbP3U+CUtd0!=LkA;H*qWChPI{Cxco=;;D7FSr*g2J#d? z5SLyp1au0()~6>3GT~(vl=D;^m|{J+aGC6FTue8>y^aq=R83DSZ2e}`f*^^q*mh#) z9ui2ozEAq5<5u524oUK%Qq*RmiHLCSWpeJ^a_p@H*8M5fEUAcl2Xv@!xGkuHA5rW$ zSLtW~3~C=A?yHEf>Z9+B3qR~YS-n%GB48ByeBC^3q6S<~G@lVSD;{tM#U-eurzMbD zB#T2B`Y~dCWRZHYx{ca6HIb%LL?0=H4lvcu0V@v2jY`T>mKTiZ2d+6nlp_)T(n*}O zi>n-C%U%hHO4f|BXx4}iB;yr292p_$u3O9)Trx<8!*btmnv8~-@@a#7kMR3qhHy@Oz{iwpjKQw zO_MmV*P_jXyly`ey3M0iS_Bf0Xy7>>&huinMurIX3m_6R+*SMu}H`qbs6tmY}@X_6ye zzB=PioqJ<)26R`)VqSA|L@khzFRDbjwXSj=xPy(>PipxxR_qQ~?#nka4Br^^vgbQa z`ctwpM;b?QNetD2egqj9F}1S6=h&OySSX&ON#;gu1V`BX1&_(d$}^qTg(4|E|_M|L-^B*1^@#p{tLc;if4AEiqB&8th zcT|JQz8YwKdD^N&G}OXazurt!zg}|WwSF;<*CpxK7TxVU(iA(Cjd2$_C=&hF06bj($=RnG70{MwhRW=z2# z3U)8EQA-HutT>*+_K+JvLuSDP!MovKGRt^pvMBQq^r%P44NGL}OX?TtYx>nmk?&Q0 zi9Z3tt1ERa3s!}P3$g;fHA5%QJjk2%ZPQPOYaZ2F#S0Zit`r9$P?W2#_ zw_I4WENc`D(6o3aoC?@Z2<~CR-JeTHu6qZlng+Cnsl~qaRF1`iR3M(}og%I7*}69G z-A6VeJl+N=LC1|za%nQ#QRliqx$3)p%jrXE5A13yk2e%AN6(dj=FWHM?~+z(MKnPg zX7)2exBrD`E~kbsseSmAc3TTVj%CQ=2@*=XiQ15Z7FDvs%7G;Ft}oHW&wX=uk6%vI z@^Dbrhy{i%Ug}VIYEaO`RJ2_MUi5$ug$8VOK|g^W3$we zlyMQjGK>{#_)fjgA`5m;U7mhiZyE`sgUR#+bQn1zril&{qrD&Rnf%%&V3TzVGn={T z|H=kr2`KbP_$wkcK#2bVrX*fIpHZgQv-suHDVrMw{i}W69A9nvA66JIV7uo9Ha^gy z95B57y!<-JvAk~O2}Xgn-24yoni&pe_R{X3JNgSxYPd`o%DNC);rBr$902a&lNEs$=j$%7bvogH0cI+y_@hcANsjkaETMQZ zTduNoLBBjav2!x%(o)FH~g5OXv%NEmIuaQB2kp%l8+N=%kP#X?eUf_|8C4FtpNiz<=Y2?r9wW*m*WqXJ>h)}@!W$ucN?0qUte5BHbxKP`{n}xY|Nen!XXhg5di^^Vq zlr$Z5pMY=Px5Bak^WPq!2RLbys=o{GBnKeEG2*AoT!wnfSL&FucF>gWoZCk3Q_Ovz zklJ(Arr;s2f?8$JF*(qMe|62NVgR>P$>cj)upYHfOIT0}28H&C1m+w$uBj9Nw;5N* zpAb{m+m=kUPO`Dm?gNXN&Ue1FD2f1o($n?q6e8e44ktR(G)clu%3h`OAP6(>DdNs#n z;-p9Tl@aDkGs|OM`?fu0RV=>|#Isw`5qg^#QW1*1Oe~m3#GBxtT)ugTp!+vQZS|Oc z-YO)9W%y~!@)C^t6E!+LhmAy|bwvGV>B)TM>igwjG4~9UJ`Tr>ckKEV9$cp{OFxoE zWxG=AujU+0i|hFv7BM;x^F=Db4Rvc!M?l3q{b!T6A?B)Y!TOYHk10-w&R%w4DbH`d z+|*%V4Qo7jmcK?uHNS435dZ3yqfL8KOg&G~NDmSD6OksT;#MQuRr3_3k5|zugz6f1W!= z&WW-KYgKW@^`&5srWUJqqlYNJMh_Xt$*K@o|Bm{HTj>425}E&X2*7qlNq5AWH_$RO zy=5TYwXbHqw%^n0RgG)ssSPDwq4r(h@%~F4^}KT-X{)xc0b|GWFYmhs9AAlfSJBv! zKHP=LE5a*{Eo|(Ugs}s>Rx3qGJ6?0hi4N*psV{Ts->$H(tuX{j==6_I<^SshtZf(#9mk;>=!+$`?yLfW-pih9~0>ZG|W8@-ZiZ-2bv)F}E z2uaZ-(itG@pE~?RtEE!D1m=h~kfq1%56G~PopZ_e%diAKQvZ|RI zKFC$6jATv$Gg&ogw z%4VTXcutxD(5L(VM+b9Mi+yv!Y7{4a{V8lB({*h@KbizP?un$2B>Y2V* zYLh`}6HHtH@-bxq*I}h~tiSFhO)z_Bc-a&|59WJF2Hcg}>yx5=9PYBLP$3IElq${f zhkaBgk~B{^L$`3F=0rYy2e9V^lR-SCK5#254*|r5iPS3sadtaDLLi;ZSEI6gTFtE0 zOV;#w2X;L|)Fmpc4*Oi82Ha|sla#g}G#M4XSq7)aL^QD0@Tg5s7U@#hPF&MwCejvB zsW;ZHG0pC$y^2!k6dzt-aU2{VV%`|_SYhX_5i;M-f|aeD+Eutw$~Q8U`%J7sTG_T_ zL#x?-S!Y)7iQvVe!ezSsy4 z)@YRUEi7yn`RFnH5zxmMdXe6t$u!LXR(#bQI_g%>KEYBJN{LR@&s|qG?gM|huDi9t z33)GS%_X*pUnqY)TDxFk*J`TSoqK<2;sU0(z;(byu|A!M8DDlpXv{&_1{Vz>N8v6i z;YlEGt+8Pjun-7bpZO}Riii538_N_`kBdDkw2F;FQl75mX!O@F*pY;Vj|1Y@gaY3_ zW2{c8s?yzWHIbj5qjn!7H0487{IGa_OSb8vXJw&W=WL!h?(r+NzO_nO9F$SjQtjwP zB6jt&yc-OG-)uWVMKWRU; z)ZR;rx6$?omt>)@-^p-bPwtM%8>*6uuM=n6BtrFheAuAn)3+L2k3xe z?w2c$2`|+6qv3t@jMJ0VkV7K57X6D5T76Fp#Qa}J*dUe5{{-e1V_a@^n{H#YX*Hhb z@ph=^eA3P!=DK9d;gYy8nZ{>(sJmf?g5Dm&?>4kAj?RM(Ke$<&62C@Ti^(zEv~3ss#(}L$&(E>XTjivSn&^ zRVcWC%>=;kUHt!L?0Zaj9--;yw&kObO(yMcOUY0X@R#CAor^7*MA@aIMukd8bWlOF*(cx3lKg0?YZ`p^+MX_;s5~ZRf>a!4qlAhHV>g%*uWSAnaIRw%unZTDT{ZWXO+()S2-5@iNrdtRUbbMeP4d1hOc@S z--7b-k=eYNsAcSTF|^y^H~!S7AUv7>s__#Yp(+m$2?FO-$=^vH!y_C4Q`-d-qh`r7 zN-j!~evgC9z|)qF7mET@lgMt(-;JzaQ9n{A3>g!TWb0^msp!o8bJEbq|y zs&)Vm|LDZbnAvFMHQ74}rfSWCNwWje zm_Z?*_$tMJybW<+Tgwtz8k6a0^xYjbkwMkzi#1#>H#&1Zsa@)20;-(Sf0a1~MwqCw zAaQ$xpvvh%EaKIez)Ua`_x~s|kL^r}A2c9%BZx6b@aQ&&j&Be@*5tvO7gb&`;>^4G zDG4~_-$91~I2jqXdT(NmT!v?n4j|+*_%y2h_r~LQe=KFDzKu8;f9l21d9k5g6MyXnb0Q)k7~!}(Cy>3 z$~R5}hwk;UPmuir|4X3@p%G17Uo>#?A*5nR)R+Y|DQ=cS2Wk9x&k__2sLJD;gx}26 zSnau=A3`N2G)5(D_Ly7Qc?zi{h8_)OVn)f&uZY2!%{DO-B)fKlr9ZXfna89B_~tz- z04|QEgKZ)REK^_O7CtFIe{0$n#Ao|E{at<_<9OenNFme4^pJ}_cGODNF@d{Wp1@X( z?A;t|rL^SM6+n8=4qSz{V+9+(B#)Yp3@0-`iVwv+7s$n(P{-};0E6;C<=*}#b#W*} zTi*S(O=s&x;2mlajp|DwVeoG{?TWq>YY0*`YnRnE9nQbVO8c)5Lhq)wu8G4Ev7mn9 zlwQA^_33&;(AWrZu;jL=71kk6e~jBHt@xL$Ui0Fh0MhHh3TW}-$-FZA+^r8-AMyAa zGXBEN2eRkrixvoaWS_iE8|cliQxIxKAnYA$8adoeWjNWY+k`qe&DN-q!wzfr$0kM_ z#urFdgRfHUb6Cm<*l9*uxLrHB_73lWOFfU>k_}rUx8V*a^~EyMP+9woLqL(A^+9|r znf~%g!hiW4gWQF@1nPIBP!Bx2uY;7>SRg!9W-7UyXkNQ{|Er5%p#gofD+{;#h)ZEkj6t$@Ek_k9GVfFgbBO-kOlipdtkl%S$qyxElJq&S*K zxU}MG->Xg&ljBG2)gn2N;CtBtK*v<-)NE7Tv&iyj?TEfuysao3KMH9l^Nr4b4D4rGRPsOINM(9 zq-c&ovHeWWc6#~#jW>`ZVcsfkTOTon3UH$i`KABy!24dt7ma&2Sek4HQdsZ_`s@B= zKE|D!_%NvEA01pcg~m^%dmf3o(V+$P8?rplJ{%rp(@6NCY2`#Fm_6M^H>KcDfYZt9+6`)1b zfR3KhY>z$XC43d>9Z;C&zlUmCbO%zIoyqZd3)#Tf&HWwk@%LeWccLY0Sz)FM%eoo!AS?0y{z3hF)id7}cIL+3~ zz@Sv->7q3EfMaGNaQ_PYHe3x$3X~k?*zk=$G4S53FOnB<2P~Ut-&wKBObY#I@Tsw` zKEE?H2`lbyW<**DoBzgEXWU>G67$_`8gmy2YFB51O1)xO zp>^==$>g=Z>cd4SZU8>NEQf+LjRg`RNR}EUG9)LGf?=yBSm$OqQ;Ow(HO$x0%IE@x zs&nul1dUl3wsa^@TD9RgMQT+@Ao`oj^>z*^PplawjXOC!kEU%Yf{=M^vgr3zL=Wyn zshwbjS`PuQm5G*+C*!hx8k!$S9he~+(V-Q+LCz6OnpstzYeG_ZkTW>9B#`9d{_dzy zMdC@gM+>rJ>WmUz_*2fJEbF{JJiKI=4okevs4!fsLGmvRLTw>D!SkKE%v>k=H zG}}ynT5GWzVPetLU`U_*3{{2U#()lxDdfD96aA3UwMMRTczQ4n7+f=@ws2F@qm0lL z{*{Mul%0Df+RDCVP%u`+Ve>IxKMQ53+Dl{(wNqs0!ai`!<4j+;m0 zKB~^rLpY;W#A_j)T=F}S9WWdD79=*d_|%I}8o-(O=h&m@J4fq>;N@E(`7hXJa49(N z=uznDod-+} zBgJ2W+h3XBy~B6r=}2{v`q89w4TZaz!8p=*;Z28;^?x+;9Slz5R>&f`Dmcyy2YFwb^YpN*YY7+1m=eZD5kt_I=X(j> z8gl{RAi*H6UaQ4znuvus`pOmXYn}1A?V7V86lgcw1(`PeY1I_wi<0s&u8 zM0SxQ7zK|mh+E#8Ya>x;rcl&gygPN0vuFRi;yZpuBftK$-d2 zoq!O;xeeL!7WOPDM?t86`Wh>ftxglT(Q|K?fW;jGYi=X=w~t=CL=6W%@0)p*c$xTB)Am8G@>v_9u=0c3m9%2Lrg1mp^GtCu8HOcW>nd*(qOY_)1l) zzt9RX93M2n07&eau4ym=Sg=&6BoA_bBKwO!+$wbyEXiovWj2SzoS!;s zT*M<~lR9IMqCPT7qk?v3^G~55%we*bG3}|9o)=ZocO86elo%@4M|Ukx1UpIgEN<|v zbQ$i#ooK@?{Dh*u#)w6O6N;)jVldTUmX4fyKLu(4K*;$%gjK|H$dSF01P2AWiX}1e zR|{j{p*J?50nlq_LAtMDFfI!`G;$rUwmTLWNy1 zk3Hdq!acop&hKE56|1ICP;3~$s0M&n51v|I^lv|3uTvjCU#FX>C(JdfkMVmAYZ2v0vv{FYl(5NXkwkN3BMz_Wz4IvpZF#J&jF9n78D(&tMkn4 z2$pm6D>{QqNOY)1^pZ}GGYG7K=w0P{`!-Rc{xe3-q&Ga7oW z>8$g9O<7cbzTj5NZNOaHY-Oy*8|s1N=_K2hC2b))QS<1MXrsP>)j(8P-aUTA>2)~4 zbd4fxWGyr28+!#aLHAwWW7o-2yIx%pcC2WwNL8B8Hv~wn%n5IV$o+pnn{g%agDT!4;D^cry?NVmRo02~`GfHN(H!VTm)Ufv`A5}5+NH@~l zByzSfxYs+ty=C)_#tL!nnCXONQ6fI9QY*%0d}13m;~GlI!s$#W(BS%A8Qjfpd%D^^ z6#po?nB@9s;qNK;=w{?s+!y7`K5ok5ZFB{|PG35=IdBCmqD>_6rB>BnHsYX)sGvAV z`$*RQK^Vt+w3G$ySH#`^t2=l){(TH)%45$I>F1;6}ODNpI!j&w$( z)>3m+g84YorA4dsS~wJ9%3nki0c*k%CR_C!VreYpl}NzXPi>ZzS}1rqhFDPw&jY9W z!}P5xtQ@K!v~C{{fpZg8yIRYD^hyv~5Uf`1+Ow=GEA^6KikngGF$hV8zrp2y{&GiH zC-PplSy(04iF*Y^0I;hi^gRB)A zZiW@}9P41p3H=JEOZ8{4v}f@?;O&euQS(qL%vxvM{_VL{vlyKPgIlUz?{oBYZmH@v z$xrWMf?z8@o4!9rOeA+Xf1$Y9whf~IhvQaf;7sy8Q3&jzjb^N{J)KPiJKBW`CZUW0 z#`Wl_U~UQ1uM#%_9itim9?AoFm~Lg08PzYCB@|8_$($W^t>8n>S%8^JFISd9f z@x*XbjG7@?a*oDEe8ggW5n>!bBjyfwJK|Npc`+_7SRh>XjF1$IzcZZvdt$@jS@c~3 zjGNUtOx@x^l*pNTK5IZIJa;^Q(9BWvJ@vJY$x{dch_ai_)z=n`MyFTcV=!IB*J}BQ{Z=?#&K3uz*%~ zV6fq(Y8C~4jRoY@NZbrlbMot$4JAoLb&{n~ctYh_<{YQ8xFCIj{=ut1fdCeptaSOJ z4_NTY^-g<4N4{K98TS~Qcs`X{rJwfYXA`82NXLW}@nj8}On$G&TMwUM+^TKl?$%g- zP`5oE)dy##Ujj-WXyV^n&n=(M_&;+oqos$J4?kzVnFrl>a78kSoL_Qm6{da^4a4~{>iQZG zbWp&Wh9AXYRC3dnlcU2B&X5rkI>(JrOJ5~|f@{fHo2SvfKPCYBEc+74aKP7ZsONV+ zfY{UInF5sPWx@n(!njlA8XWk0_H89nhT+rzsUNV|vjyF5*WNiFMbzjjg}TT$jX5(> z5N}v1d87K)asKw@{_Rw#2*CUs>$dIqn3I8-Q`>a$d;G(~bAm8-xUKHKKpOJib3UtV z`m;bw?*#BnR;O&a^RZ~2tEXVzGF3A2jlSg$0{gGz^uwz- z#Kn1xzu*&8;UG7Dm?VWWQ@;MZVvK1nHTGeARk|fJwSkGLpO@cCQx2=D1BBb`RrsoP z_y>y_)QaIL!KqQ$TqVu|e!JNz%(aW|zsA}j6{>{zVulc={)$v>cCsR~2J7Wd5Up&g zpf)RH5{l)8t5PRgl+fQl?Viw{td^a$H^X!$`HVeiu+9UX2Cw?(Vqfr6#vB}eGVIsIqDal`Z|!ZA9u$B%AtE9AIO{{k76>LTT91~sKrvU454sn+VF=@-`dt#)}v28PP& z%h6yjVIM5M4;{8YZg{)vYKUx5)NfB^*9e~U7@&Z92WAN-R2zz{D8rWBMugO(La$SR zZac3cJc75v{=u!w9)*A%aFwt%j1V+kMkJ+F$~(V7lf{D&+FjIP;@Osm!a`FFnr(=W zKD9e&r64MVN&;@73j#qaX$D^0N3K;~8T>-Wr zL`UgT^LNK2yr0ZU!Ys)_H(V4vH$M{K#BE9>Lwx$tY9u^OeLl*J-irQCQ-ePW&yg3FyBb?k4o z{^7&PWiH93Q#ndwzDq)j3>cO^X&Z_<`OMY+DwQQTQZzf%>8=@Hp3e<2Upik^o@Ve- zF$?1HX^H_(RQ2V^i+^yjJ}c%&p1!ab%}OU3cLp z=DyCr&&sn%w4ɒqNOo$FoA#+b30SC^+W>i0W6D&GDRcz0ga8EGG1%A`H1M#VRT z-^fvx+iEoOB_pB70Ppj$S}^%2bWXl_=QSC%z{cdP17OyMH^a&1Qn8N5>$owKW&Y0S z(32~w-S*en__OA}90H4elM9&4+SAS6*;L$WX!&Cm#;1qI{AT=t{cJ%}oW^Z&cTe9; zOzg0B>5FzkF8n{KR150Nc8TR`SCZg^R%UoA)D)~Q@3S*aOZ);quF~=B>CPHUqmEAL z9wV~l%dWa@7Z2|GZ2^|O$wM*>=`V??k!&K&3*9;_8Ko5DA4!+hvtQY#bM6Z#&NeG8 zPu@g3byiM0U!JX*U)_xH#Q+U6^YaD|^EVB3+L+MI{m8j;!~JFBtMUAFf@x^K=CY%# z6c#imF86~!A2w0N!OY1@GRjepfWxn_Jat2XAUtfNFL?Y)8l2bucXxI=log7$UAJm9 z8~t9j-_JLTM)DL>8teok{9uPl3vdi+ft$XW7(m#Q!@uLZ;Qfs1n=o86Y#VCkp$cV7z zBeDI$Pt|%_<`GZ)1%fe4=UbK_a#F5F101KfBQ}WTAF`oDiFJiuMFYONWMS7K%fKbo z&&wG~sJLLY&JQ^7f#y5amrp*59{(bi`TOaafjh@YG-60)cv3NlUR?AECaYigvEv&+ zfND{nG?vG{2{qTB_v~pS-6Hg*B_t8SDOIG_;NQ%}5^}WZSgdtcznjg$Fh7&7;aC^~UiC@g?d4JDkFEm*c^cNJ zTCcv&LUOF*IRAlA-BcEEpfhHemR5D!>hY~&FZ3Bax!1TkZUL|ZO9*J76%DSWr~=L= zj0PYr&f9&wKx%vXT^OeL_Z&X4?x;K}l#d04jA$i4fD1s@JZEFGBrvO zS>A6{o_(TVT~9kK#H)^d3|h=JAyvE@qt2$B(J-R8c!f8k3ZFAygR51>NlId3(lqXv zOrH}NY#(c3ac&sk&$B}Sm&N!iBfp_O3Tk@Bwy<9Yf9Hb>=E|6^NyoLy)n%rSF*STweZ}*-zIk!1S$&&$+UW(^ z8Xno-HW0&%@_fd><5( zsie6dLVl4#>?&NzY5Ud#xfg4ONuEW8{YJFff=Rv#_51>QQ}6GzA;=SZYgWwu`4|6> z?Q12cyojbo4w|^@4PNhjLj=rsgbQb_7Tk*O?DHa)o z<1Td~`6G9sF;M?>o*R=4fU3m5KJf7-H_1VEV{gc8x!T}8a99_+)@`I23Z~av-Ek={ z4^Hm?O#%^lOqIvF6SRrQMEqw;zSQ2kHUwocMRr;Q&%|7Fd#$>WG<#%MW?j~U$_Wl9 zv-e)$%_d$<8=caXTy$SL=?a_$AS%{5i+itE?G1%`x8i`RRYS29K-8Bvm0hycwxmJ(1f&1Mt!}rt!Ne$C^kvIRbF|hKd zJ(OADwK?L2mjmhW>Db2{zoV5``^6IZL6t^r_QvQ?VT8WZy2Q++m{1K^xR9$Bvlq({ z5@upy;^{h#&2J!JeQyU)HLvk8C6}ZLvbLDku@6g%EF%Z4DP$$5u`1z-U=8yWCx8{v z!xIu;4Pzg`U{QU?LTc`rmwK!K4sE%!%y3)uRU(CP(mr@yZPnhun`7H7O1lbc{@zcN zDEo@f^3+3i%A+xyjIp8wE-}N0mpc0LD1OoqZ3v_W61RwboEA0d^-Fi^fAylN{jagz z(mcNl@f8ycru{$@@(>o(>2qCFXcqpY$TO~jmxgqmj2$ihFI?@4m*Q%fjof~IpLw1* zRaP8&j6CKGtOa%EsP0M>Q{^K7$QpF`)#17i%BO-0*(i}9Jy2vZw*wRcpn4NA-yED& z#ty4-oP7gkRK(8l0(-JGAj0k z8O!GUkGMsQ(Q%h}-R7l=j%VMdKpJ2E$-nBLn6ckmy41?UBd9B)y78=f^YUyNkvbvn zcg$kQz!=s~Lf-XgIsh`Om2Qt`GshhgpL%IiWCyZ&Y;m?6cS>;lTJ0C{nCb!U9jVmr zNcSoWaKQ7;Zh-+0VIwL}?<|mdIW}X1D}o~m_@u1Bu+71)LguM!OL+F)8e@GKN;V5L zwsDeLG+%_id8|C_m8=UKI;Q7o$0{-pS?wWvnd7?X6ZW%;Z+CT5-eHb3**8CZ7`Ew09b65beLPCk~)S|d&=JFQ=hCPi# zD(gq+VlplRdg+9Y>nqdW-nXl{i14BvYp%;(lfW)obV5>xmri{;ls9)MM~+5qroCT; z3G`&Z7PY!zu^HH|tX*#j(+JDdIY7+p^*-bD?^<>z{M<9q*2yWmebUZR@vdrEybXGG z^>}g-)olILYFi14osdsamN5N3%_CmMLKs|JJxJ!(>EsEUBe|rCDFLd3OF#KkZ*@vv zM8$8z$8qT^!J?^&^%udjWe_-@W!uhT&x>Ke)cIr-R@E9b?O}pY_vV$o23&Oy=s0hG znc(v=$XEM0$5gb_S)eu-J(7P-tX8_Af*0WZX0ZlcTm}vX*T8n!^x0WqV_ z0K3!0@&3U|!{|^dL|syDNJ-nXRmcmkX2p3K;qA}xF0M_VOvmW)Xgm)aK9H;Gj}n#&^<% z_KbG2fdcq(_v4vt2`JruN&?;nAIw-S7%h{b+JYqj*|w98=;P@P3<+Kw1#AR zZD{%fsz4$4sPP^Efdox_*1^#4%sQE&bSQfko98*6yBlLh&#+Bs(O!s{Bg&942Us2H zlK^b^ymRkrVoT#WjU^e1+Ssli)j_XMBX+^B?Dfb0ZJ}{PG-5(O$=x1J+ns`D6?8@Y zU1l*y2FU0xtLJw+If2AjV%`KbEO^gW4$)^x!TZA+`meR8pcqBG@SJ*3hFqX0n5X;c z%-&MBh$_F8YT<*eV`f~YLXu+%vW+OJRM|i?iYznh5xg6)(a|a4b9i86nerV7A*y1z zM3`V`oCWqFKasP15-j`5L;~NiKXkI<740Zc2FmJGf#zblkF4eocut!>FSB<+mkd?y z?*18TS{eHyg!rb+v$p~Vq5T!$F~}qdYq>;H6xysWg|@U((VWJ<}-VZml;`%)b%t z6ZaBz#MAw-^ZlhsD$6x}#=h@Yn3M6zH6mqFd2fR5f>sCtPYtm?;oHGpBp>;pf8yJR zMFdRE@-3&$uM=Wg5G2FKbErMK{gcJZb5xh)tz>0Uw2fbfP*B7vj}K$e2WNgi`(LTlD*pzK^-g|# z*3b3yp3uxm-csG*bjOH%=&u1Sxwzl>QW)QWF=d{AkD8rdQ^-|(2w`GDsgP<^_1!FQ ztqR!0S2x1*UtLkf^1~lkyX_-mdHe<2Y1tt?746S-e!aW>z`kx1UQ?q>9#-UMDu)P>cw}O( zBy11AEOlz3l)biVSaoU}zlVFT2?CpfQonAs`85)uwiE5n1R~5YkzeoAHh;RE_a$Pu zK8C;xi2hHmB(`5-y8H)Jcz!#w+85njiyGIn~I29MI^Wieb z@l^qAd=y<)Dl4>RCHRY?VnY|_XW^D0v3=i9dRH3zp?T_-6z)5tKGf~~+(85u3|&kh z2Z!XZ+mC=-$>Zx`h`ZVhX;Ojqt@nZTv4@4c1%UIz6T+sdW1i>z?*3ZM=HlSGUYi_~5a?S($f8)5X)vsq;U%qwvzMZ%f+?tpZM_SKrJYOyVSYsPT z)KYvlx$?e{rn`~&q)Ol=s4pkQ!1Y_lAh0J~1y8oDONbfkv0)``k-C5hbyS*nYG27) z%H{jUYV;r)?|DGr20Y{zi%lc1}uI1SfNgqGZUT737<`JWc}w0dqa7 zu*e&P5AR&`$ug8eIKfETy_~8JzzU^R<76nb)T8zDGJRH3cypEimkOljS*=HeGJGje zv-{td+eF1Y6s1W0161${DzpPbiJR)iAaBvCf%lAlAwn9jg<0RcaU*kFeh03nL`F!z zCu4?C6YDtd)H|zsq$6Xdpz{|pF1^NjTRsqW1Jl(Xl<9k$3}|aMVk9eQnOa~De3Rl1 zUddcL6h7Pt4SLP^ZOf2sL;)F3R!Wy}ni|0fKX|!SXxD|3b|rN~vyD$ALjiMyN;LYa zA}SR47!#ZuE<|a{6WR9$Tr6CpN_fMQw9&%z2h2Bvq62D$Kf6w-i59AxC~l&Cb3k9i zNm_VgSZBMTM5oSFTrr_td-;p90>2^nblCa4b}3wcL$^cB5$9>*HB;|jfFBPNh5DL48vURrOG>T9IB`jYO(te1lKcFb0!hZV z2S|rHP2-^E9cfXMhBF{9(fGezH|~GCZfE%J1}4Kfj~CgG41JvgAua7i!KQ}p{avf( zB@y!lbt~K+1JXHdetc@gk2S=+KXj3QDor)#pp5KY4M`uWbfrimBgTpfY}S@1V23C`XX!+4w{0OWOEfY|@Mz^{f)41bw7cVT+D&+H@!GQxh&s zgP+BuQYX_hC-mN~L29EIcT+O?&UH_GjWvwXUR{o8rz=AeidoKn+F7nCPPf0!(`4?P zExO=y+coZ2-Q%da%FLWrW=^UKh>AHwGPr8nwaB-oNfU@aSA3la5GTe`z&vVTz!~Rl zRKeT0owHfH`=iiQ332hm>^j?;VrCWF4(=<+0Q+vSRQ1oLxDi0>pWfFmCMwHONJRJS z%pZ@bKp8yC47Ggg0|SJui7!mZ`u~Y-sU<)!1KBx6ExTN`5LJGNWzs4yTDJXD1o19S zE>|4|i(QM2pRg3ZhqW>$G`IcYd);ePl^?&OMifcEjXGr0dk2f}?59j=VC$(e zq_XwczcRf+um5NgQWazSs%gw93LDukSH0w@rTLk#SJ@66sNIFVPd~SJczsJU98Utt zRLWpvSFfh&V*)wmvb{$aC+n0!yh_S!tPyNEul$?o z13~Qbb^gI?ogzUP(Wn`HBE~ho&y;_is8CMb)gaw!m9}<2KYKbJe-)%mK=l+SuZX(> zugRTk<+m=)4jaL^gFked9it0p_+TP=zg36n;TJeM{(&R1fAr#v&wX;V)kJ$f>cLk5}T#}h#IZ(*g8D4hjdxMuS2@w z`uEe#^QQ@Q;Rip=j$4<0qes~z-(98X#lt4muZuoY5aRKdAg!To&cL7022`YzRmh9W z<}G^q3}Lau{)!#@;7UKC7#&}!Ac+7=awR{(WKg(PoKEGey&I}kNa~;wQi6*v9dXjbcdYb@!7Y;xm-pu823_xEBC{Zfxx98o zV?-6t|GI|m$5W!!NdXmyH^A2bB*4HM4p5nm$!YDNPD!3Gd=E{8ulE5u%}})%0$EG*JL#VNIlvl>oQ9@`N@Rh;PG0 z_ukkf?6^v8CHeYau1<}438s2?IFn~cU{2;bJJO4O>~RkS>fxMH<-NVMr(9c+koCXZ zDEr;bDd<3a%`>Ee5vp(pi?=QHV&TPKh2Fsg0jYhU8yq?%qG49XTSY|b_YfmGF3f8$ z&s;J8HYx6uPHG-w~Rd@2eOf(H-1^*n{V zzbo%c-p6cQCkbRm4jRLi)f3Hx`kAR2e9Wp|I2KvzOkcd?hrJy_ zC*}W6bXemas(}X&T$1R-;P%T*J=T2vLbR;0ns&?k^0r)WTE7O<#50yLZy->$6UGbC zhVp(O{g>=&8}#oEMUlGO5Mvw?F>7L7!OyGkObk3n);S~yR)4evGS!gpwS@@gwmGkR z0y;-oBkH%2q9ZfejiuX0Hpt=BH+T1cq^gT6uD6d~(Sn7r^j%HUE`d!X?Nm9`b=D?| zPB_2{SUY}_2DAZ}m$e#%{I#gmbi?2Lt2jfdgQ+MurmEtXrmCdKi$+-%RV5vST#`=X z6ZEFQ$Rz{PA!@(FaOBvgMLCev3;MVq%CDu<2bC>*2r-nt=5z19`adCF?hgtpG8FDL zoONF%54*sgEfxCBN-%_tU|;y#?$hHVmJUU^9fHh#?!YkJV5}^w&k~*_flZ&=yJpk^ zFJ9FpicQ?b!f{zUX0DdNcgd9Q2w9&_MJ>xFSPC?j_34pv+8jFGdllpYB?7DLjH+?PMF--fC^M^mnGw3w+A~uXB-kybU3Tqbq%WuUzZDgtiqlrU zh2Y|UB`l6%Gpyk469o!KxZC#Baum)K)=;ThL8J-!>c=GbW=6pp`mpAMNhwbT572Qb z;>jQOZF;~OW3wrZt{^+yZkk15_qkHZh(;-UtyvK9c7)TNb+fo$TtS^LND(1^Kl4^# z{wSNWTJS(yvjPKn)LZOcOXxg4Ilde*(gxKm-GZ7lt?C1@1z+uAJ)dc@xJxK&-U z5+6cryDC&g6%7OdDuoGy9r1#b3pHBrBC47oovKa!%KLZ4QoY?8*;?wkJ5LtNCa2;% z=-OR6xBri=uMDfI>)KViTTtnc?vU>8mhKWM>28n^*mQS;gdj+FhalY{NK1F0iO>6d z@A=Mket7M@*n7L?oNLT6N8IBc7HLu+eZy{bG74>WewVQA&L0qJtJH>Xs9iO>p|zbp zh9p}|3p_z?g4 z?{g0hn#cPNNF2ZzF>ml0-4JN8^Q+Wjpzch~1x!# zp}0x04st35m4*lV!d-?v!0s%`o-qkkqEeX6C(!L%WBQ46^19~S!-@+h*U)$bKRvxo zXCdON!f>by01KVob^FXz?cwt?&bk-+L^AoRV8AZOr$`LJUWztW1gu^*e|5r%XrgkY zJYT*Ixf(&M-Jb|>qb5{qHfYWvVOsxA$RoGk?eWCJouq8K^bvc9yMdld4P?PD@xB-f zE?WvWPp(jXRO~XZ%v+<*P@fYVe;EYGpsV#2j5xHYwb1aw6UTq04cBkV%dcQEenw5W z-)fQkT<5+?P!1FGVqMRbuu#6JV8OMxINVt8gWNaDLizNUdwEIp1v{xP%H~G}OsG#w zk%Os{k8_LaZbV4|7{kAo2w`vi4Sg<2zp6 zz60ei+zTK$f+;r-Q0mH5zGhuG^Mvyuuqn7)tD;pDD1K@o;#^DqhR;L@aPEWO&zGbY{)%=+)#{7hVD{}Q%%Sn;was#QUoWahx|*aL8Etnox2{M27E zq6hrc>o5h~03bol=E?eFIE77K46XZ?9BHi?`##F6%)Ayer0Ut?1R~rypqN9TT+4_4 zWZ-+x0e}gz60Zj7r&nT^4{1p~Up|*;cSZ=3#pw<}?!P7})g<(JWVS~S*{Y@3*xo5C z{dU7m1(E>`=+Ebo5&uJz8{7`sCSUoe$bB=DAi#gcjBOFON-1H9W+1#* zuNHf)r>CMHNS--F;tC+N1)*Jz1Zmc`m`zDX>F^y&uE~lzQ_IYw!O}WSE3my;ONTGr zjp=hT#(&kFFFIzznnZWmo_=E+EV|)Mo0i_TD;{^voSlwQdJQ#K? zzS9fg?zH2EXz+{Y+@;No<6<3qooYmzdN-cvgP&M=Hurwjg9sP^19Sd@j~LIFHle)% ziOWx{1qtoC6}sG=T+e)XEYVHiFDdUi$=N0ksa}rGdYL*ib41=|9z;_i1kL(EuVpVM zA(Fn^HT#+coU51q-4ftj2#a)kzY+ZHtK}OJ|8|YH=o6kPVW;aXSE8FW zPL!#@sWw(Z2rX2o>C$;Q6EKnKe8bVl(!>P}5XYMjmUkv&GHI^_`3H|gH!1^W)hn6C z{p2(hqEmzn`N)R%>E|1NI4vP}^7PbvFK@|QpN3qz40#0%>3?5S(K z@01z7X)&O|)&{lRUv~EAJs4!x^@^%y6PSL`4k3xWz$Cu)?{cZ4k)h%w`%zl|5dWyZ z;#jOng#-m%7b!d`-IxT2RtRlO(Eb9uJ1Us}$BLyHk3pYdZ+4}Xnb%k3`j>$}f=&w5 z5PQx^+NN<1=Upv^h5EX0ds6v5<=Ysjy~^-eXY(;=hiZvTQm8|}ALz9vyj9RFOGOQC zy(MCjdgWkNpc(?17~6-Raq(4B$lE;K9wIfP9j(6* zi+!tC`Rpk{TCG&b<2IJ66Og9XoQaV2K0>Tj-K8)>RH(d0A<*#9MUQ@2p&;wR=S!~g zVL+#sLu1*i?eWpA?c06$os;V;c||E0*e)KEg|v9YsUA)~TXLja?Ue(5DRTWo%TijL ztMDX7>ADqreD4-B&U?3?fui#$JMVwyhKa~C>nC?Q{fcbQ<{{j<`b8Y^(_8#R6_T>N zg0o){XRnh~UNmRFZW_%a7oHdJCcN|`bLMXa92g0MgNN{Wr$dVXWSBb1&$xwqYbzb1 zlJU|;Da95c(`p6ATCddlDq97le( zWXo{Z4@6nf`5M$KpH_bI2|nCkQQ#qzneLvd{MP!j3RZUmR?p{g9no9QAL0TJrWwN2 zVz29~y4JSsW8P}wAt@itb5u3t^gX+AI21_WkB2qzIEHQH^~Al|`Au_Uy`Z4la`?lZ z^9KQ^NLYO_vJIcKJcu^X%&CppO+KX&Gn1&gQWS@*_M<>y0Jh`u>hHUyiY4HE%mb*# z$SL-%USEaXS-s&_$_+>BUD(r)Z?8*}DzEB!YD+qhGwqH&eYt4g#-MV%>1$e94>)v` zAO6;c1yJ;FDQY(T>w&wW45wQRs)P=hT6;ZcjDq*aye=moTO;EkX?49CtGx!+7~Pu! zOX9BsKLTRVr;z`{ixrjEq4G&2IWi%x^}ELMzx0L8`8_tvSPsq($w@vk9xP4l5srLt zetu0_X(9gjVkG0U!o9!Kp>yyqyG5 zwD|zLb{>stp&L!2X6zi?xp*JFR&}yk(jP`YB&2@Z8)qpzV9|bVQ1Z2&smRb{(YJ3M zhJhn{BJwH5K==-t0dO!Zd#ImDbT>Nhp4$GZU zY@^nD0&hvNvlK-g`kRF7d}pHXeaMDGr6F)&P{a)kyw{!madWUeXU{(JF(ZOPjVMEl zas1O4)^0IqAN-VVk!|$Fy%zI(1*7wUuBQ572=fhR{aa<0iwUaC z8k=T1wPu@XHyM;VCtNcZYO#ouk6C!K3h66~cpn$+)TETN?F=2Vy8@Aq&%lf3zzfLzQgTAIJ338JsufBZHBHbma$~?Qalx6vZzE?q_iPq*7?%aF1rP>4oS>LJzl?Qr zwIh>idM8q<_W;|onKc%P4{MXkFdY_Pte(`WdqrcD*}90vsHzK_GkM#r3mxXxzS7g&h!FNfF$K41^I}F@?EZv4HUWNk>@f4EpvH$?-S&fl!sGSyFB)BD! zOS0>EcXY|#uz(O(ovp_!g!PBH)mO+_a7oRs00?I^Cx+;xOWZnolk5rL|Yb4a2a zj6Z`(T2OZ;5;EP&l1K9;gXTY0`gh3>2DpR?V|iI6lkD9pU+R22X4~a-ieK6&tbZz& zJ*wA_*Z*zl3{ps9b9hX5BKw{_G}ztKH`~7~WD~RN5B7UJEI#nh7YWh;FoK)US9jSg zXXH&7Y}giM2ZsmSA9_^gV!X;2HjW^XID5!5-8$BlGt73r_vh}WnVUrp9sU;0LPMkb z{EK?r>jWAbo)#(U@6IVK6F6IR&!%qQGO5?7<3oO@>rPXFBE{eL5De~@H1tSvFJxT( z3Es}0m+KAXYf@E)QNSUBwsTrcMPGSt_bLPExg=6Z5?{^}4u$_n&U@Wvi}7rKNq%~Tq7s9cpHVh=XP!XI3} zuSzq;%H&;>q{F$_j_+8paomL>kf*q{iiFGpsh>^ZeR050+cSKp;Hot5H=t>dkC3bl z_o_mZWhKr7gH$;5=Z}Y7vvzh{z+a4_lB+H*RtSxWA*0;5i-`N&7n}GnANu0{_VQBN zPpFOEW6Jcg2NN^HSY^=VXtI}Sr+EluWf)ZDEKV1Rfe`dbPGOa5S-E=9Wz*|0{ zP$P5D)jnHi78apJH2hlAYx2>szl$uPK#aM^W>usw}asJTEx1CvbXbU*w}_Qr|Zw;0M_ z4LbtDKW-wk-Qi{vlnbT_V`OmJW$Kp++XidtMnBsp*hLB;jYoj~ks^5g8#}NYR^2GN zAzK?hScH1d=;7pMuM+>{!xs!S3OPZ`idImH%XU=)Zhb7R^H{v#= z;se!C*C8flvj3L%@8{Y`mk>&_R&qR3ke-IPWfT24z8BP;8E9c*j{mf?%#D=^dJ*eUkRGOu>B7}^A4hb$W^^jeu3184TQK@<1R{{cza52hvyXDYqKfL~5_#Mdu zY^dMbHF_4icS|)G6(c{i$)rFdh*rPjRNpq*^p*!#Y*9ixPMcNK@q?lLGB*I=GP>004g;xLC-{4TQe zI}BdRh=&|XZg!=6pnAfncAa78F91xPqDf|r>D9g*{e(_F%&@PkGW--D*s~cAdj$ZJ zlr?rD874X23O}CAJIO=*C1NzVoAX9NfX-#^L8{iR9|1-|vjReGEh(}s{}TjQYZ4^neh}TX(%AcJivH`gzupFJe_<$(Nazrix9? z0q(>o&duBRXl)8O*7Vc)Hy#h@qFN+= z*$kRNlX!aG8wt^FhGDINb7sY;P_oo4#wfRrnx=imyab^fJeIwoHFq)IiP1huTA;&5 zEE)lqA$?R{1#?#tkMqZ-^ne{#Ei~d+&m~B9Fh2%RQ|tJ967+L3lS6J1GPT?k*7ZMc z`S}m#0a)mOrb>dQpjz|um8`??mHMBE0C$t*@*Zb} zd$^%Q23f0_3XaqTki5*Eqz&xp4Z9iQJ8A-Jmd>&TeLOQ`4sYPx#gAIQehEeJ8JD4S z^BYehC>g7~i`C&ff}TVFZMqt4Ax!N!sB|1qN)b=Qg4f#=o4&PBV(}@Jv&iB}g^|!; zMju`kL$^|Kfi!@-j@j0vaqM6;zcBg7&G9!pXA{yl_q#8;?rbZ{v;xR#c3_1_53c4M zdn=CGx_K4f9{-@TNF0u5?6>3g(4Q^v)g8_;bl7q|K8Tha=07zbm2gEG_nSaF{jS<} zMF_&Lh@^>)cbFXsA{!12OBsSPVqfzLoNH8soi?QWLSL&(J0LW%)(BnePDnJgqm ziu(8?n1g;>t%B0Nde^G)3H}F-yx0X;=AohvxuZ-5S9-|=0*a4!*LVeu1HvT}%Q;j=zRzH)iFhhiOb3h=byX0t z22QsOe8j}Q<-93g-W`+>Uj{XQR zvATfE2z$l(vD$s;h#bsdpff{kM?|7Llq$5x zf}T_*W{SdqC7*K};eCtw6YnE5%}qV#b85kZWx~Uliwr!9$@K$h=C0IYC^tOvFlocs-#uzE|cFe`Lxz(7z$tNWKD#X3x%d&R$v z4GU17x0hD4$w-ACtg= zal{)G9JeNtyXmMQ{>Ot!m++HTQ6L|@7RQIW!zU4SW&(ax=^iM+GD}`o$K>Qo&QdE? z3wu|lnWNwhTJ(I9Ne`EzB=XK20OJ_nS(N+h2gM>hv29=;=~`EdgW%P`_dUItkH9mk zkR^LZN{J+aqCI0aEG9hT9QK8-TTEEP6|zTLgv|!v?u`fbdckhZ^%b(ZdZa{56$eB! z>I%e6phR5nk^+MCG;R?bK9@hq(I%rp04G=Q)@HsqO1sgCM!iB;D|oK!Y4QA25tyY1 zX(+g}-i!PBLLc+PJN7yRiU2MXOVAvmcM+Ph`xkg4YS(&A=`TjTlm`)z0o7sg9j4Bc zTPdKL>7Cwaq}gAcF@|ZFYnUrK0!!%#(D=#2@P40DN5zEkoF#(3fb0|k^q<^6Db(A! zBcTDM%*xD2JOkqDlVPvGc$NF=YiRlOb>~?a%W)xiNiGqxUkV_2U=?cOj9avCplsPO zV3`opHe8pSlWgi%b$OB&nCi;8Bmwx}Zx7QjHh!-ulNHC?z)2JhOSIp!sazT}pkpJrvCA0m1Hkkzk~T#&ghBiw$?{a(_XR`5NBQ=P5!0Sb7^9 zmvx`0VnS`f6kaUd_th$LfgQkXg0Rs(5JxouT7+!h9<~(``&@NMJ##ROArmiF$ly@} zQC*n?+Jx&)Q``>=-x>6pTzqjcG%02o8a6CBLd8(E`O^dH-T)6Mdx*E+Y--}K7-?c{$5upfoqc-JnTx;zfutv;#rf}z5Yhq zlhJbb$NC$~kC17Vb08HR@At(kIRDNtLTH%0ouVZ9(Ad`S*^|sZBN6!Vv>BgB9>4;G zKP>=`4^IZgY+2skf+^$wyaGcJ4B3*Zp;#L% zz~n1sr58j@%0HzUOLLzz*>-N!uhRdy0S}a1?D&|WFAyMKXdplhKmMJ&(AkqJ-QJou}A&#OwlKd3mfBtt5{P%?f3V|z9Y5C(o zFRQW=S%>8R^YtfsqyPT6od`VB`Ss4K2%-EZEs!6*;RUR`|NW(>t6Ws@MF|~9Ls2VO zWkrLJ^8fu${y*P_dHS}rq&{!}T{reS+3^i%=Ykn(;FZ5QH(l}OshxswM<#{P>Pf?jT8S6rol}7Gkmsr9c2nb0kx%&m7rgGzV^j@ zqDu}(ZT4%(zbh)G0+c~Q{*mJ^Mj;?pjN333Q2&mz9pUOpR|(|&!j2FUds=ES!wtDE zAwRs2UC$U~n0~cJjYlEp%`eUv`6Wg${W^|~;1!H0Jat2at}*pOgd_naV8osg7V!6D zpp{tSbiholvYp2N?Fs2dgKp zQ>dGQh6r|m@c9EO#U*C@{$GRrNeUMufjBKK3F8hs<*;@(VwdN8I<3rTvv-;iIz2}#QD;MbNl%QdXI&>f#l3k#4ALSg-7>#Ph;q^Cj=>E~qz@w4+_~y17m@gNie$W4-6GGvh zjx*GjZs!P!r&&@orvFD@KP>?Tc-%;oIKr)&m!n1t%i0AG z3Wswb$pqi4lGz|hnt!JHYs30ve}9eaNq2AVHm;PYke9=W`1>(fuqurP`TuA^mJqPI z#uc&+!lY#FZT=X~*MI+4KSmdWXcDvXug%Vv&y90Q(4p!`+L7yRcf$H-0&jUvUN#lL z99Y$zi8)OM_Q^V(#p!$)Snpxr$0R%lYfO0mJ5d`{@BF<6vvxYeo{9Zu$C9Kxok6Ao z&+4zc+ny7_{P&Kl=1+TObNN?e4;fhNh#vjOk+;WCwoVs1T(z66Xz;b$&7W?0cy1X= zK7bRhBpNc};NC1|(e_5Y9LN=@kJVxXa`0^>wiH-SaRxqcu!&W=6iHuaY0nxvfp74~^6XHWoc z(Z+G72?-9+Tu&Efxg<|g1!hS7-d_8E?iL1)dNr))qdkpqV7Js4($$mwcbbNQKw52t zeo;%11UxNqz^cPEDJfW6(vBBXVGWgejkhADe3t~-)?c4J6M7~sCamWCS8Dk5WA8B# z5T6k-{Vy2J4W6OxAkgM%!LaqIs&>Jgtzenj!; zN(&f~r#+kVtEYrK1PHSPnoixuu=oCVqA*?W`bzQ;8~RNky||tn4mNAYSlreoEEYgp z&3=io!UiV6-;c#QH2|k=z+M z%`v##j!5o-%_`wpZ856A?YJ3{^4Z+4C8%NXM>)!*JdA?pTrs{5a+SK=lei4GjA##{fr10-eVD$A^1~Q_wR` z9z^_+kitPni@MU(dgSRmL@i_lwW*n&dBk0>C)0WvNq~Y5lm*QYiRHHR6i4!q| z=Tr2#Phm{HZc${X+AO;!WnxSo(F^YJdhf-}H6VT;O`~zGf{!#|js$y!nOa;x-Wbv$ zRf}M9dva|M@n^zfNUE-&iF`XZb^*%P_=03sY!p2u5zod{P(keumYubq6eMXn! zPrk?d^KyvLC{dMte)PJdxWDepil=d;Y^1V4lX3PeMM&5lu_h!(LId1Ry^;8MPIOs5 zH3;gZ)=(_A~ zfuTX+9V?&q_7z3U&G{acw^?ot98>cVl+Endj$-dD0q^%evxO_{5HFjaJk)*!H&Q_V zG)M=HS!K5(kjm%I+5+z4Qlw7MZvqX6lC53);rb1V%g%T(h$0kJOdx8}#`42Kne;^! zpb_%Ce2^q^?*&Xs(Sb{=Oiwr5mxJ>ONQ)vlrT zKK;SjTX5KXiq-qG3zW$hfdx3hv?I)-ILHK35o} zcKS{1hQo4VRy(BqdaXOW(44K&(&<;-uLEB?vf&56%_?KfW-%KgzLPkx^1T;z z)QQXmsft&t35V~JPTG#7ehN`0!9I5HIzD?`soisWzpg*eXH!CQq4)M&{FCk+ipsGn z>d*v3iAwUw9BkA9msDCIqw4*aqJewO=d@aFd}f$1-G&60wWp&mi*7vnZuj0FN~Q(w zNRar+Oxw9@^^}FtIMy*}X1K_Ura#AYZ>C>bw2Bgs-2eRE=uX@(`-A6u_L`y_ z@=skpU;o$d5VM-pTD4(6%oWsXG&~}yZ1y21jSYvfZSN+0(cc=M%c;LJI5h4ode zGiS9KR?~j?rDElz^eetZWxU(n4w=>LZ8#2`hd2Q_QX-v3)VoUk@~!2K4&Sr z&BAvMTo*O5(9ESKSA67^o2Pw~nItF}iV5nei$MQueB>Ljr}E@dSg)HX21bSDi$OH| zY}xZrxi5;)@@#udTEFOhxez-hY{N>PbG&`atLHM&b5o$S7XQ+H@lwZ3%7VL=Fb5DDZ4_nI(hu0KLUGIIm#a`2AsKc6B8#9{7 zD82-LiGdWD85y!Qno*=}_CYfaj*#(sae{rd+`sTF$>%zlhuvC|Dk_7yM+chQVI!M! znMioLE*Q&FzJ4k?xRnq&DTh*NhsB3DhmzqP45!48<{CfPI+R+?;07X~3 zKlEAKz%XR8rmj<3_69|6@{P3aC zY3J;H7YJ%HNqI@Yh~lL4x-xRSH(dUK;@n58UixyA1CL4HI@QYG?TJb%%S>C*uO7~s zWr;;uNwVwJqw9*d1qO@;2ne%+08RkeF2Bp~rf!OCIOJ7eqZETpQRoR*5ec}aKGZVk zcraCSiQ}a6IMaeIDSWe=6L-gW zN?pzFJ~&0i#vRibsw3Iu()lq+CFuC zH#mRKz^?@L37M*KA+6qg&95y7N$kl(JUluW3*QnQoO3lrNqhI%}@J$lw6c zZ#uG$5chO9lNWoJaEG|?#MtHQuoNAa-4emT+DUtzb{Q5D^r0@<_3&}oYFW?MVJ15s zxYNlb9I!xeSDB&s^qW~?Sb&g69o|RbI&zTxAGF`v-(1*Mw4AFAw3SV$@66+v>+ zbC+`(gRo#N=u^cT4u&!eLG1)|lM5GEUiXuHBXb#CzhK)huMFK4h=jSQb?d*PjDtLg z7@$~FH}ZWtW5=2fTffB4VlJf@$I+7bff7JI-v>Uk;q3@_$TRT}Z*8gwQ2#Mt%ES6c zE^a57*z4#yA&*o4d+&kRHzmMqtq>2mU8>dVdSLTq zhLW)U2G2}>yrr$ioaPYSkq8UzOnDTK-PZN#3|)==i3(41$0@0PAwXM+b!PN+69ozx zb-4+@#-@B+Jz$81WV$gNxy}p+@D)N-*NxKzuNYb;?&QzYVx^O*sbffF{s;(OE=haT z)%KlW_Uo$OaH!pf9lpzw*HdW1^DU(f3s$Ai-aWWOFWSyX%#2K!JK4fAH^*0bV!1GV zDzO#^rFk2tNfBHaIXXVf?Ulx|^i0!|Rt9;ej9GwHY>F`Z=Ax&7rij)NPJ+-Q>pDzmL{9hE@?Sgtb!(no$NI5uroANm;UR3gg*ErZlFpUDl#=zV4(VI?%)1-- z7aEREyp2tUhU2ODp#l5BA6EQZ48Anh?ayV>wy31ow)UK6Jj`ASjz?z(Zim?M*${B7 zGtlbN>nEd92B0II=rbSeCeTe5*)S!rpA(i?p1`Z_9C-5Yl@H;1iLHO~P}^0`!p+c8 zE?wZK{gma~76GP*a`79pHDKN0OR(&q0bO-r!D?XhwCf1r%|SA>D-w=@v>Ni z9YPK&s$1y{TAK$i4%;5l=5R6_x%@0vTD>U&z6i1Er8!4SQnIsguA!g8`b5Q3#HnHj zsRDr=H9l(B> zcw(y|dP4Z~pZydC1)AIV885kvI)nTQxsFL&fB~;x8r#nXB3mQ|J^*Ni#j~!NE%bbX zZ`^4-2ROcjP~!!2^(oX>3wCXoq3+CMdqaR7wfi2S`vkZV*R&(Wqrma=&P3nr?heB) z0e>sZ2i*!;5dJ7Rhm;s$ke43-=a4g|ynKs$_$g=LH}QlvvJ~SMq=R~AR_kHsTJ?sT zK))^RU1V|*qa9zJqS7uU;3nht{tZ~EMovkWl=NRO2W%Inotb`mT%Fge@%ZgY+d6U; z6@TteKwIW1M6b(#105$4;FAgXwO=2cQfm?f`023mqBs>E9!0>0lIdG>^7W0A;1x^l46Zp8*0-Hb!+HDbm&nc25P|!4P8rsXj5aKZ;y5L z6AyA~#)kz#8$JIlEO>`8%Gd)n_HchoxzM$qw#Nrq zP^(uIPowH#WQ2#wXoN!XQ^&TU=gKara>AZDB`oPq#}4&gl5`KM0Nc>yP2Aw=bRM>u z_Gt|=@7Q5K-oEH#k<5k>|3X4Y{m0uS4WGaCMPVUATw=n}Fv7U;wjjXqsln*gg{y9> z7xz9MtJzoXFfHHc^AFW8>Gl_Vb>BF}WDW?I~0%_OhK z9hKd=dqUI6bjK#owSa?jJ(BZxfqtLE#^9?#2$wRTpz?UUzib)b#+)cp{@0uqhQy1rL3iujF3D*#QZK;I#7q_=VR#KvO3F}e#JE~J4udhP3C zV5^+CjkW)526hxECX{JZ$!@#e8HJA;yHm*g@rYXJl84g3_hIEV!64XJ!bT@YZY&)+ z!1ltyp;WAiN;<6DFEWERFZs@%F|(5IR<3|BYMkTcZGFipq4qTqDmIn+YA#it{p-TB zsfdCsh9cerxxCJIqhFUr<8$>p$b#Shu^zpjOr2(IR-rzLB;u0E@My6yDV@0Dc=2g# zc!y1`E?siJR#umjR%43XuZT+E?ws_4YGp!rDzhuuhVPXx?VtE9$dE>fWjUqOjo2M0 zPBKqs>w9y}{BA?$BXv8;&;Z~?=EEYd8I%EZm840ZhfDemEmyi5z0}TUevr-1DffUm zQHbE#pREea{%C)^Cazs&9Q;8Av}Qyh69F`BmVg;sUksZ5<47_~7jsB3pW#~!wB1mp zkEJ?w74~Yc=H0%MAh1PiQa~#!ZyvWpqra5lRw6CcYx*wjj4=01l>VTjCZ7UnH5HV+ zE>agL=SiF`I*yiL<;bxD7eF+48+bZ{IB#~ZkGjzD%{bow^gEDwmi8S~&q=s5`y;A) zulPQ;2uoapI7g0)^O1_^#t+fJ&@FdNB9ljJ{_i=%!$z9f4lEqF(S9VNxOdmmzqvQ!UR4XaT-* zgEoCQXd-(%&|Al=)sVa$patZ9a~6RElc)uB296Pn&zZ1^^aXA}3to_E%X%+#NKoIaAt z-voAqTa*E&!~82&Q<7m0Cy`tZKy>6I>jpUA95!S=%zn*Dg$AT(7_^h*l2P&UoKbIN z6WNDthx&ovsQTqyW|-g_OQo={5Cf{Cd_)K!V0x8gXzRy~y@{Qyv5n>^GQ4-zkiiFW z*tmn;%=@BNLZ(VxC`PW z1=VIlPB#o)zH&*S36~!ZkQYu~5Jc%Bk)xFzczU6g8*-)bM zpj(tec*R5#A-xt4=S5qwUBJE^(++W7=y}2V?q?S8kY4GulsAUGiJOWpSHdtp2Eday zYHcq2+P4oyE+nNn5BP8k$2zcWamiACFj6E2Fw{81cspx+qXxd#aXEc@TTZR(8%0A0 z88Q!vTh^1TH%^ubR_bx5eaprB@>$-GF-fMq`LU3ZQs7`(uivjH4BgiCBax&6H3XZC;{xs?$5I+l+p#0pZQT zu&Ncz1gpM6+8@GGIpt(40tiZ0xE1k5O!W&!F8&-0);+re@TutmmaJAu6^Ual2OniJ z++^+CZ(Fwrt4M+a-n>7tIW_x+t8M%_gs|z%ZOx-(nTtgzh?e>L3iQb-y6m5+CEnV& zsHChymw^Dwm4Q@PI@77njq%d1QoUsxFv> z-HcE7GKnmKD#mXR}CwDqjNrwbyNn)NiEpLV&SbcZtQ@E~Z z;FU|aADPfSRB*lCVZl~fE+hs(3dajXkYObSj*^?1&C{2LwVkboPVHKvs|WIkd_;N$ zxThJRs}?^W%1deKuUJYjEncti4jpYt$h;yiJ)@M3zwDu!FwNmuI_9$~UH6%}jGK4f z+g9hk)?#Z-81m?TNMvz3>l$zTaWL|rYewYzF+T$T-k_*a*cCld6v8vCHOd}G6KHzA zAx2$4#Ms0I?P}5+fywc;-na)ztIjrqpE=bszsVH!{-Eh_4bGJ13Qbt3Iv(;il2}ln zzz@Yn^o)%EH42_e&D%IYs=cLF<~va3u#CoNhM4wJ%Pi1T}b_nSh{878IVWrTPM__d>uM>t>KC z`x9#IR`EvGFi1ei2{lK#I>?i@TLEJu1u;N|&++4dDWdU? zE0oSVU5aKVq3ps;`&H!UT1$=2pGBy*Da~17^)PZtQ}Z%yn~x)LP%;Sn@$Cu;U0B4uhBwv}2u&aLeznl3cfzw`z;Rm7{;Yi&#VMX%0b3qvcnXqv zvVgHK?geIZ-8v!d94}>1NIjm=;unEJVKWRWy63FZY{zu*vIz=;_h(!=l!Jvgvq-rb zUMR+ti&><(j@U?R_XN2LUZ#0eZ9xI`qreoUImly7uLI$ii+fGGz@%oK6{9a8^VqBY z4HR?-SwJDJ3T$6^(jFLAL_b4}7K%bpqE04{F6&KbA3RLzcTD12{UiJ07kcop6zo0a zB4g-slBLISW1s191o59W2DCZZD16P67`y8Awy(L|-e-#JRPiiD-jl%r7+6H(dzzN@ zeIwN#4-W2yc5?BOIfksA!7G_|#A1*wg%r}tmE(vdcd`2>L*0SHTfsV8Y`Fz>qQDcP z;6RK|{B~hu??&{jXPRd0_cuai6=E1?9EPp z-?3}6jxKMzXH?&tIpcd_4^$49Ck^Xa-0z>;OkviYyG-W!bL)21naI;23=MKXrKEy? zlpm1q#bZJlx^|hip9`3}-*kGWj}VL<;>_(`bsuN`*z=8}@>tvyTT!Iae)j@b8xT1O zC5PD834XUu$k48be^l8gbh;M0HkBEG54X8k(K#S=Z<|kWu~ewLJxVsJ-qSCiAWAB2 z|2AFIl@|hW32#0+_^gum?(Qn*+qd|XtWBm^eX7fkQMRs9ybb5SW+kmhqEJ}pSo^i9 z5q`xAe2nV1e)&f-AIqBrEpQD(N4O?>pvKpH#hC^7!YEq>5}%(ms*%P_5{4jO63d7q z7EfCD-Vp<(yDQkpViBuy3)qoMzs5b4L zvnoPS0AQ7Va6t0gm!rlB7+Q=8p%MwF)x!x&Kjn)|LnFp) zU__!~?y$Niz2ea7@q*xh z<=R_vo;SJVX^6xkrjC(C*hr~c6Embo!K>&29AdicE9z&}9z zj&=0;S#W?)j9ClGQI{m!#*|JU7xOscqtAm;NyI+hE&1&nM3<&Et;sk0ms+VA<>FNk zeGQodDTkn&BKJeDV{J>=+~@$PmS@p zKvp%&T~@&R!lp@c%7dfe)g4rzlL$LV>JU0NHbb+gy~AeDIp}h|5&VVN9wrxq>Y_IHT_TOa+nrzk*b;RQ=Hg}i?tUiUMmU|4uCqpB z-Lrl;z|B;BQ}`?~&2Q>DcUXg_Qn+}baBZ4WJkO)$%QmMVD6J6o5Zg~#_E2p*_xjZo z8>N~Can)V*`vUrm7NNx9bSA-39opK|>9|IQ*RtQdb!5sj;zm*8r6?Q6ESGLlci!H} zI;b?aQB)&-X`AJ*dicbD5euJDJUi>sd!Hl~IQ`?#(v`(f@5)AGQWqeIqdk& zdX$>P?cZ)^pqut{1ZFJlqwy-)62A5%;^M6y)?M0?NnzQiD@wG`_f**BJi8$1=x8vE zu2sSaoW(FO`#)U01yEIA^gk*ht#An?q>*lrZlt@rQ$UdJ&MQcFcc(~qDJe*INOyO> z&G+|@_ukB9hG7Qo;hekoUh7jUg*f!nIYeeB#iDD%6snLtCd;w-0q0&W;dGSZVFAbW z>v%!=@PHz!Ep&B7*vx>h0#9t_W2*z3_2;MIZ!-^D{ zI4~t?Q-NIOQ`kCs zi6a-dpQkJY`WplYDYC7E6z?(VW@Ip3eqtv+Gwu5do3wUp$D1S7u#jFO@?`Q)Yl5Vc#N!q;4X%A2RN; zs+ofk$p4v=pS0r)

snu}X%_mnf#s8by4n(dZzms zG_lTc262WWor}t*+`CVIeOq z2OTR{A_wXIxfP1rF|vTtCfo2;vUf81-!04*E6@;qE?mg*QBDYU9cU|eZy~Rx@MC|&pP|mO>VELospo$?%_L`IW>K$Q-Z%;V%r@D&haPp zOV}iyHu!@4QL8)5PY(iKoQ}OAGZ{TUVl^AD&=%+Fz<6zAF2j*eqrq+!%~or@|Ec|F zuokRMxfJ{Yj-rBMDE*oB8OI?$t-}&yau9W_M8PKUiD~5$`Fy-A!;ASkx6-m2k%{-y zW%UIV83ZmPU0$_=;e2`h{7(@uw*8_`Rs2JB?3Gs7KYv-57=H8M)}Lo9cjYgsiwO8< z7vvp-kMlqtEvxacQCR66cQV~;Cb@Ih_2+ac7qg72msv4ncK(u&d+@Q)CCVp$c6ec< zNzO;IN7zTwB&Y7LKGwGxma@kj_p06Ae3ru$@m2oMGqblz*%cRt%SU@016M@e2cCWY z#&c=5P{)RC5hvFH)1*NEdL2Fyepi>!@k|e~B}$K@_DHZ7IM~&*~LO+Jtm*>O+g`34bm$t;oHf}YCT*1D{^9I{s9INxQ=`kj{gqy^4r%fPPUzNL^*YaU7jY>mF~J9~C|l2FP$H_}9Com8(60oWtK&f<4IR{7&6jL>W=A8R80L zQ@KTfbB69~%SrIh!*`9ySEeHKW0_|AGf1JB2 z%EwAa)#Qf5Y`cBc+1oDqMk43a_H`E+XG4gjgt_5g?gH*?JMb zBzaW}Z3st*;=PDX|sWoWVLAZ(zRWZR>V>UOi0d#i}QTK999oA$69H_p!h!!x=K zpkS}i>4ro4?03^o0A^ORyg~eW0<*%jBiFy3U_{Vhj^nYeEuytb`-SCnxMKfk%r4Y&VnZaC@gwz_{EDasOVAdziQ z@Q&KE(ep0rqKmq_jLOjG%UsM~H)zvSm&GIc`0>l<{}qFyBICU+s_r4Q-RAuZZ`!Lh zvr*BxO!qNs zudZYmJ)Dfq#5PEQ&E)^Ppj8SO^FK&8m|u73c@EmGx-L!LcadV@ z-}SJvSk5TRaYXQ53fOmB|3DlaMok==)*p^49d7QLb#Oj99Lv==vc)MBj2U9>?(IuL z&&9kvY}&H1A2Gh_M}6xeF_U-@T&(hVy{XKk&19O=!4+9Pkwmw4Cj3`s+c1mwCjteuA|J@($B=%X3#lUV zqlb6nzZTKO@eJUEZ2A6$T}7yryn?w#gJ(l;{7X+#LVOR8WQu+vPp(V_-5H6I)eb36 zT6WtaxKo&b^@9$JPktkZPh&nVA#Ln%A(2z0j#pq(cLw9{whsK9oaDR&b*+`9`MTc1 zx1%3G{L%9qozvSAN0!T#TIl)rT&b&nO~JG=`z;pHR7a z^nS6zT)D;*bK`M47HNfR3-9NPcd!Nd+pNA*@!c^!!4K#|IZ}WOZ7YT_|KiONOBrF* zMf~RQ4`-2x3hVpB56WZ($cX!O4hKN7e`_hsp=cf?#bAe2tzpJ_bNbyIDf!y|FqK@PdC=h$w1#@yOh-ByqNtAg~@k zpBW7|^)b?&d+kxWd?(h3NOR}zoHxjyJKSLF>>$AA<%E|UmyYvML*~S1wduz*ltn6ot%M}Qf<}|7QTtztjqjYdQ%@a z#C7bYjB;z}3yzQOx8S8WQu@ih4Y}W;01UMUEkc1jkXT&Du^Q7*iAPq4y(Hkmu#Qc{ zAA(X6xHaiRw`@^$Cs${C{&nvw`(V2^-Pgq@ea(84cp8d7WkOf6bm(3ziqDAX3o6lI zP?+HTPhny@yuLFPmuZa2t@1<5)M7a|c4D?M`^)@o2weu(h#QA`5RdM>tQJAC_xH5N z9O$ov^F$mJR&gKZZ|hI>b`{HtC+}~^x0a$SDZKV?72}qSp;2gindAGGY$g4%jNu6-lO^2qN~X70&3Xl1_sPoZK1Zz|CaqLlrda|~`j#js?Y>p~3Xv4aH-Wvj6i9$f3L$2Vf$%oviZp#Dh_KY@_hvi|4CAc>^kSjw2G z{aCoIK)`hA0NGdqa-@dO!4Lsz#X3#2DfG9f!jvDBsfxk|4qi?P+={=nd_Nc9g3rf| zCz1eYoK_lA5FZ?gDKG3eRia93F2I6?T!4)%Y+x>T@VYZ2Rlv{#7!2@#+xUZ68{$_b zW+E*nit&c#Ek`>@FBRA>E|`1q$&Tgu8q+cF&~Y(|T$8B5KK&?XNm%Ii3zE)zb z(G(wajMgRNnWdN;?C}8sk=l-~fc7`2Gxos%AG~R*T_|4Y_O~4+l^}mtMzK5@Rz<4M82`8d_}B)jfTXXh zznt1eGCb7zanFJmcVtMB&1kWDz7_DFC3xO`xhEP%+jH>$IOkwa-TcW-v(>iIllS(5 z^IuT4)YU(A>GJfw(#bq&WVqNIq=g92J7UkZ^1IB$Odk!SpPcvpm?lTDXigiaYrV^T zwDsSeFNF|itTpHc0vp?VF{dRjexI-+gS}?Of#s&|=Ba{!LAvd6l;5Y6Y)-0huZi2* zZqW5DGD8Nl{H71PbO}cZeY@s38sq(`hbFVRqRu4c@{)K;oh<()`ef%V)4cnDaqN0X zWLsTskdf9u<->-*s!X9@9Th(GqtL_aZ^g@9LNuKUFZ!})2guFKkE5rg7ae{9O>a#xKuy;l0L?4 zAc$3rwN9-cctsmp=f3re9WmO4$5Nnpsl#1dw^`_&Z04RPZ@@!CecDAwzS2)kqbS(@ zY308`{)~J)dW5YmNlPY;Rz0m_m%ep&^D*T^nLP5hX`}PAUbl_r5}C9Nj#I5BdTxd} z_tm@D#fpW&bec7wSJ{HqY_*S97A8$Ar zf4_%95x?04B)Bi$W|8#3k%@h`b>8dxXr3G2=}SUNNrF5)Do#Xz4JXMM;>q2agexj9 zKq$;5($OhMKj$yOGW$Vy$bZ`<$&y>MtK*qNIozKD*#-ccR@qh_DGq=TppRD+$I)a52+Ft(($~-YvbS*Y5+qQ@5EfKQi5ou5FOKa65czrtU@fv3I0aUr zWG1KU*NpoPqq{mNaHAA0(t>@jNH_~lMIEW2i%hj8q5CCsHptnI*~Cjk6LD1~Em|of zFi?wyKxL{*fn4xoQum8P0uK_=nr@YmPa^L{&RL(=ILcMkGW{HG9a-||#<_(E^8r06 z2CZCw;PLGZ=fyFj(oeJ1LCyj=XUy{5M+GDFy-1#5Vm4*4T+w|?P>$?u#wFLhda+&_ zg=)#DhONJgAH#Yqb}%Tu^PRIvaM`n}-l>S4{8sE$sw(|D9W@%vS?Bxbu6p#~aZsB` z(5SF{lo0advNQH8LT9PUuse_xl<#dTkA;*EDWmmdN9?+_nZm&JzjKAq^0ampnbC4j z80xR^*E)35MA3e+BEUr0Hl6wm;4L4bXkTI>Z-#!@vm-mcX}H>0E7YrXF(t+NSV+jf zGj*0f>wQ85X-pSi4wgWW*LTb;g(xtF+*gR##UD}l?0n(>I`pDgY2;xItuQJ+TS1`^ zeczMBQvtp6yc>sDI-UZRko>IBdk?G8aV%Y_z*AG2_q_Bw|C5|Kr2SKSZ2q4U1g;I0 zU|}Iq(z>DgKl-}@O7FZhPwnmx)60rO7$g6@Lbp<-GneCA_}X<~@$kXfv41(U&FhTS z?W!}0_!VqzQcRspjKb8cnRl^V0tq)vDSxCX)#r` z_q@(i3Nt@Yih+6^Q_aUNjDP-aR;b$e;5v)0q_?1oDSZXMcT^v%btdK!!`N_hn~?%N zy6P6i9aI@t>rY~>n~o)_AzteedsmWa9W(Bk$q$E)ZhK2xY4($4Xu9sa-sSg$$Td+k zaH>?-)0sZ~P=w&YYjgeYR*&Uq%M1soTue6FCGqH20yIw^+?R*^mw&eMZCYo_Y>Ojg z>%X2A{9##r{C7GK7Mqq6;=-!~zvK}6Q*UmFo0ju;F@X`xT!cVvm<~JBrD1BKyF)** zxp1cpPnNiQ#|i~JS8P5Y-sWL_EZg;A`Sj-U=5iRp=_?zZ?EQtp&5N5ouGjY?e4n1% zMtod|v4P|9O6Ty$Z|?tOUKuAvVEcb^AXcwLxcNE4uWi$Oz-}>?_pOmO*U5`mD-yEp zd<6J$7am?>GhVB-t(I0md1V3C3w?TsRy5D%a~O2AHXP!w`I(RhivN2L2fEZZPQfhnGE zF-IUljmmY!58<~gdHH_qjuMj4ABq0mYZb+<^Sc0=%x?;|C#%0MA4CXp6Z&)LOCFNx zE?j`0;FgBL{WC=`*(xdNN3{x_S?Y+-LlBeQeyqoxyMdEqLA->3XGwUY;%C_yhc5RaonjHbU0)@ZfpPB044j{1RpZss0;n#@w zx>RwkG;H#VliBm5g99j{W6WD-xu^6>)GP%Bz?}iyPTdBVOu(CW+~x(mERXIo{S;1O z8ZPzeDaVmhZTVofT`H4Cn2>h7k}Lr(I8BXyuzlg(ZQ&t+xo0@IielinP$-Dh22HhH z`uYF+{50a%o^#}q;MT#%bV=fSn8#@6ow>m?Kl|q;AJ;0++oS0+_*WmJ71PXQL4kA1 z#wbAhRFefLYzAmk6vXh=cwnO_RNC3IV7|4czkJwHxDz<`L;#z$<@{M}o>-!r>pbwY z8%|Wu3#x()>3HH6rjg1k+~}`|&5Z2W4SLuGyq`W|ipa4bFkjf?sD#*#TJMUrrfKUv zt;ulP^?<*#MmoMd&bbG0vRlZ-Z+b7VeC!Sai7}$lrG;eFQyyWW1#GjD#8Bggvq%XL^tjRIS^6AbB?G4h;9mC`5} z4%@DU9FZhjkHZ-!Ph8AdRnF3>ECM!Av-*SF--s&;M9AV8aMrGW`F%52L+wU#Fj<-? zc#Ji%eu38~@)t}*`6k?{GD_tXoxiH01P~K&!dM8MmMqar00W}~6rpqZn5=KY zHw7lmb(xmjX3=v8KDU@~*R7BlRdU4i5w5MYxTDOHGOY~n+Z`4lhNz|46c?X~mYPWX znoT@+wi19grzjGwjzn~8pylK+XIsDgY8R*86boSO)e+Pw7l9ts6<_&C@X8-OmF~v8 z0`ms!0L&oxvXNL2>}^!lAOU0Zf3e~}@5a;*rSMLWgVY`xR5Dtxy$kQTB6Z2wT%-h4 zqaA#sh2koIv3f_nU1p4~l~$A7&{EA`YJV@emx~(3O^-LwBHtD-`N@b#PUmuNv3l{+ z^t%>@IyIJz3C09{&}FBij!dS2%}S$Ev6w2Z%f_eSZ0I|4Ij%Bih6jpwC-_DS+|3_r zTsnKTiQ;cRr5B;DY|d}S61gjywXsib>iv%ZDUs@|fx%p+`qf0F2}721dCwf8&^Z}p zyeDEP%U!x%ApXLh#`J296Vkgg_p{sY)N$pbx00X4v`5NfI+sf>ETt-$AHoKvD$@Vh z8vj_p^d(akP~6UO|Elmn`v(B${w{hV4XQg(@4`O4-g@=Y3TY7<(H^BdKJNP1CmrV$ zE3Vq;#~g{C|J%|+LHRqyphuqT(R)tN`(~d6s1wM31U!B5gWNnk$;Lin>u=$^-Bt0JJ_VrJPD#h^EFZZiU!Is>W^X^C9DPqb7|4r2-Eq#) zVWlckfG8i!rE8t8jqKAEw3UPA=_l8FyEtXZnC}c3Qj-NALgq>N+4>?_%uHzGB3}OQ zEC5mmL7&Z`WJv^@#bL#|FSB{k5ekF>Ka-kXD+WTcN9i#z;kD=MBU^{Iy}+WGn1}5| zET{;~BGdfX4Mdu{&NGGx4^=g8x}cssUO??RkA-67D^(YVfKzZ3&1+@SBzJczaV2z{ zoJJ@r8@?y@ztyb!G4)Fm+hm=};MfeGs@H-zsNw`(>WM0C@*(dIdt2S||jCyHa~`+{fQTzQGid^)Fp2=Ft{^il)T8ZW~NdA={9 zCuw5Z$)F?tL!}NT*G2;k_D z3ueF@j3rFfr6q%hH3}BxyP-RZx;tD&$VH`A?Le|U?w$nnd-&bUo-FsSULKJ7I?9nh zBrXJW#panv-kCf7^iqK7vt-Gf+%wL21>gXCQC|QXM4Ry0yc*iU@QB!DoWz@Oq_zB- z3Zg<~@bBY(KCLF&#M;NVJhTpTaRz)RJjeo6^_aDW+4wf-g<%$av5=b3Qo6`x9_qas zt53ToBTYn^bU%=&5Kh7@c+TIKCi2HolTplF`WKz~+kH}+I}C`f>^nbE-e>tAJ&bOK z0-Oj@?>I8uK@; z6ZtBRl~&WkW(~~9SAnma@hGumHYiKlik9B1JW2foM`oFJotW6gchN)sysMMyBa4ti zk>gdT$XTL^tiR7Sym7@rN;U5TIXW7Es=mz$`zU@e8mzhvqZZ*e@2LgN=b}chjy;57 zjWaJDZ9!KKXJI^F<%@DupuaRWWAxsw9xZJGoeSPfkp&q~j6o~>MY?se+5b_{U0+i( zN>5rIj(70{eSe+vKA}=!X$G&o$^Hg;XNvk=!%8(w;^0YPIz+#(d+zte`}LmgCQk9R z(KR}*r_-Tc*EHC283vr~3sZ6|k&l9i*C0gC5KY1OmRt9GL8ipU#m)uO+ZjxG-fZVz z^nd)g5t080a z7^FmR@IFeEoAJTXzw(1d1VSl+zrba862Uw@+=8V^;doMAKi)V}w&IZu;o(B)K!yxi zcqlszmsykvzFL{wAw^u|s8}J}-%H}Zy4U;HTlE=qbK5QzQi(}~892ol=Q#^Xp4IFW z3P3f3ng9cl2&R&hnsoptkn$O5l44GkgB!two#pv*`fVUG7_s ze=Fdw?#cWKFOC5-x-eWaR1^p7!c!DeAKoG7*g?|}i6j%J%C#v~OU)kFIFLc5&ru1~ zNH6hNV<<`UQ?3Ln(Qz>4y52mup)1iZGL&hACej8SH+nqMcWI_8R@alM310%5h_qX7 z_sFrq;mILy!;qX+m-Bt?5T)@_6o8+mm;9;2hY8%m>o~g0E(q&3>i<+ULl@?rFlP#e-TNwqzSMUCxdq-#79 zTtC_58|mK-8azOxTMW^suXUE=!})ecdmC1B$9#~t_V#czE9XD{$>)5%^mLxFeV{;} zU1K%PW-QBTw_Rvk84WwP+?tYcLjVZdR=^P}v9xyX0mN-S+CDy>Rm%IVjg#os`QuK- zK|W10o=>ciaRKhma!Am}7-$GUr9`Z=+o>>qm})$j?r@;2dZ(tbowF2A(|GTbP7TH^ z1isIV3L0^vK%eMsuD>pW`5m=p*5@m(=Ln=&$Ei1F*zLal zNE5pcZQ1K9QLX;n!$ySo(Lhn~Q|jptUY=FlyP42-I)7t=392rsJKs*r> zMM>=M$p@V^_KAA##q?C2ZcN8!Vy=o8NjYznqlyOdwRT}gi+gg5V8?%jQm% z7m-)I^@8BGFue$m8#vr`WsvlJ%%W@zcmePP} z&yL#)wU&%v2l%qsRO{gL>nEE3nMr?Ij_X8`;-!O7{750d+VtS;(D^eY63%ymWV6A{ z=Okn`@}K$uo+CX?N%7Z|jSVw@n{iegsIz+wcE-_fL&5B1)dU!U+3r2Ms{B;amD9dA zb}+YoSBS@|{noqxz5tx9X~|)PZx`!w>`wuR`<;%;h!A*mO$GrdZT-V*T&Sb-gdY1O z4Q9tM$T^y1EX3gOCGt8~mghZ;YIG7EnQ9+A4PqHI377n=# zA|iz?r^=`S#NWI~biD|i3-rM9*lK`mRD;(3{`fFRVgUrD_!G>^S9$=?hI5?ULpRQT z`+AHt{t0sh%7g_4zQb;D;csi_u>FviHxT5eZwsq<=HsI-x-DC7hhqq)8wmYosM0*5M99W(-m7r_+{&Tlk{Ns{LG#<`Bj{*d%@`^&OI};gRx1LrfGuVO`zy!Xm;W5 z(41zisTCzd7)jEZz2%U`=R7UzjQD0?KEtDVyHS{FI&|}6IXom|S#&PLJ;~~XYJRS^ z8`tLb8egZpfa1(HW2neI%lAir!WQ0bNzG%;eplNKt;xJAR@im@d28(DMxUu;#V$7Q$NbBH2gA* zbsS%{dLCcZ1t@|*%>&(PA-Dm=c1p*Gzn^;yL8(Ru`njJAd z*d)uCKhQ}3U>Q5#SoJ0#38zN2U$!Uz3SonoK}B(C!p`y=qU2VFM}<}`f;S1WAos)h zc)EmNQYiZPK%?wMaP_TYEl!kZ4S)PF_a0s6i}x72QwWJ9Dm^tLG*T#}OM*q42~6pU z{F9{ctrtZSHoZyn0A0R(xL%8_iy8qSC+-*2M>llj6!bFUHr{=vE25y4S9&!6c#|TO z^G$?>_DA9unI%x$)u<;OM}b-CCqlofrU+~)@Qt;m3>=p5{->Y&u%h}QNKcR#&F*JY zrt$kjbScjWr+Sm6D+Tx~5@_6c917^GUw2IOC6Uh3m+Lf+z3cdNorfX9YPqZWWe|wW zw#IWC{t+Oum#CHFIsYP8Uil2vt%p`iEscT`0f9}&87uFy|K4xET*VQIIEy*Auv{cv zbStLKG;P&`4Z=WPB|{yj*v)+{68K5Q%=2@=m1w<1Fv-iHKK1;QwO&f-ToszK0@(l) zMov&utGcj6s1@N+v8%ZF{6q-arQe*uEHWH$E&O`LOjMEyhH~x$%hhl)F*kc*Londi znxJD`yPTcp8x4HXc$7Sj3yZneeCo&U`+Xe z#2!JAu6TL*{E{@&!qMrWVGNW(t|Uf0uWEKM4k>rnuvFhE;sFNTxfZWJa0YYQm#}dP zMACQ*21^~^LAvZc?*p!`$s#yOD{RX0ZfB^onK=HF;iEpw@L{24q~v~;dXEwXuYG2z z$Jf5?(ZeYBFMFhgtC=lLd^H_gQ5-H%KHR63y|g(jGmn|Q-=o0w*4qE7--IYG@K|Z> zGmw`m&J$3a#87j^$|05f8sP;M)`SJTK&Cv=$3k{lHm15SQ^EiN=tk{er^F{EVBj9r z9QnYy`aRWTyeZsyR8CE$T21^gw%^b>nF?)Xb9~d6YqZJbPyL~x#3bHif&*defYa^@ znwU(?4x^VrVz&Txc)RL=dc{iF-YC4CKDWzyZS|+g>*ZKy!@c?$X;?y+0>|z;2f}7C z%R{WYmEq!vfxlCYZohpkD!x1n1zmA>U01&yl&&nimHxFG@AQ-FCea3mB&?wKu>!g^ z8R3NS6cGZO^Ti55f||v!mGXLn5}D+Gc7{p){|%mu<9Z6(9=r8&?YVFMY0iv2qUhyU zY1sayop&Em3`cy9oo+xU@AK`)UkC857#)$C&nQ`xTDiKiT>BG7v5rHpMsLl6I-?hr zvRKb=Nx8PxNDw@vL>ry-#$Um99X7Hb#rQW@IM3q*gZ<8%#KHD~`@z{wHrBn;uj|zr ziQFNJw(~EzPG{DMUUI>ezYYx3MdhEGov72*Zaj3p5&_2 z|8U)z_g)NAA56?l;H9*jETkx5!%4o2aq7?=g06aP!ML6 zPvrw=oHX0#z=tN=l|W3ns6_pU2a?x!%H-{Hpr}E4M+be+8&HJ0%F~JlXzJ?Rf+B9F zh9K&%e72s`JUNFJD$N`AaImLEoz)8YKubLJ!NPfdx&rLB)OqJD4mbPUv%QSLn8a+L=3y23v3|Sd2^0 zoW7VokW2!Dfr$Uy$V|c@+7l0rj!ROen}jv}F^3$Mf&eqyS3A2nyy}&Wn|QUmbfBIQ zIoIapiLzx3CjO!M)9kY87UAm4!hDsyJan!=++;lSsQ^b_;4qA_&Gwb>OdF7+G?-`U zskQgE0iNA%1PL)50wb62d@zS3q6-}=#2~#RO(|svw6mJu9?i`+AIaEdy3@Swr_AiB zMe%pzMR8(|rarW%k%-P#U}FEgD=s1=!sd{Aihbk_K_CsyO<7WlL@iEzJ#qw+98f$j z;??y=U1r^M&|)d&Bjyj^jWSjUro0_bbX-;sDG1g0w(WHd6}^&o!YAc2LN?O*1Y+Wf z=l1C?t|m-IFNhDjFM`o{jb7Yvf;=z*=yc*hLY(+Ae&PsF z=23nWIRE<|roy%GJVlBOYa%zZ5cSvnn-(Ay_=qY?gsU_d2(UF!`v)Dki8mq7^z1Oj5M|bf*-Eu2u21XiF&;Y^FwHv)p83tD<`-)Sg|K52C6Xt%eJf zSxgFedd6gxNde@KcH`r;B4HU?X*sj(jkx@;mnI$N5w)Z;yGB9_;uHehLTw&f4yKl~ zPeWv7J3__*zlS4ySQP7OXVn?^&M{rI&aqt>fJ9HA{+{Hy+XHbRcHF_+3(&E0j7Kr< zXVxxuw6auI?L#LPdV9JO7O550YV~;{C;l+<5hH47gA9-jr*iPeyAa)Hj$?b`IQ(vX zSY6VFw7OlcSFV?P{N~=U{8&zDF5ua?hwxF3gjV@;_KN?}*5FNHW+8_pA;DeJUYWdI zP+7eF+X3lZitwR7m$!pY>BQZK?tQaYGd|){XS&A5g2NFXlC>*wNtKt9SNHi!&DFWC zax$vDW)IS>2NUUHc@UPUf0}u&eQBwM%8Q8RO}Jj`si zDPYabnL0=09kp=3Qa;?2C{y=(VK^R`2aK3so3(-IrE(8ARh_=Xff_g(T`wr%#Z>(v zuLA-nOV!m=R}(fJ>wqsWG&PvBqG~Xd$l9JO{3oCbWRcM2sxa}7LyX9AY*Rx~VKX4j zB>qINV3KTP7y$1v~>0K<@6hf=&h8y#iV!!yJVsB4~D*3NP_^D;Im-nN4V>%P<7`H#NxD z?v#{PROed#n+-C@>k+l-|4vsry+QF?*=ZfT(d`h_%pla-6+u`<&_PQDe{G6=2I=V4 z=0tfqw^Pis7%-wP%seV%AtUJa(ZmfI?%^$R3XxZmGw0b5>p`Sw6~SmN$;{+(?4MJc zJpduq7w%48{UaRxVcgr`!s_MI-#kkR!cg(z{&I`Ez_j4@Pr6E&2E`|%;3sQ z&MiLqVruL3@`0Q++#prk(Vv`kYw)SUO@zRRaUM`|B@^Y;0#6Mhn_ld+z|Ox7Pr2lo z@ul8tQ`id`ZtXbOh&+@3dR$##;Ce5l*Uk^IsDFvRthnJZ;jIIG=99Ulpz5|jVkU)$ zqh+BQtDX{76229*fqd7smUn?wZIy^YsY?Wr(sU?e}^BMp^&(?yiZ?K1h(0 zAhE3dhfK|`;waJ&w5vhB2n$M8OZ8Z%LG{7N%%Pa@(rK=c>;6uUs5 zS97uK^GLpS7a0er7TX~>?fSY`JgR$BQBH$u{Vnq5y|Z`$r@A3}Y7=$N2Jxhy1SA9B zU3h#liez2J5c3f_O7OB0rES~&*ATIqe0KEds=9}6!=w}2!34C5!`?jZ2W!*6kjB!} z#eyG=UW}^kFyejp0b2<07fMq-{Bjrfx+y5c@b3P&xV1cHJEChczgb@Gy2-1_?~%P* z(7MlmZ&_UH@X%mwLmTuCd3W?|VJ^{TzL|B&h5qz9AhN$<^aoW?rL33ofOc1x3??bu zCD$77vsA}HVmv){AX}V};w_D8RL~jrzNlDG%7_a)f^WTx>Qgq zQuhh+dNSsNBVcS^CEdwR1BPP#r1{0r$IoJO_B@nzQmx3f;B9IkV;;5iC;YhZI;MxFVA8Ic>A} zf>@@M^FemTY*(QP2;iU#@_$5z`#1jPJ-f^;e*l&9HI7g|KrNAa0&c|#Ei>npGVNy; zBAX((HgWu-{?h+@c)#V>_imKoBCyXR>9;ER!-I^q^jk%R_#0qV*zZeBh7yrr1mfUK z*JgA;6r(Oauitvt-%}T+8KE$NIZmlncR(6g`zvHK?zfqc=XT(BhWkJwXv0KQ6wmHw zP|*#m(aNd>G(%$)AYS-DiG0x9e_L8dR!vLV$O8P?|3{A}>dtRv@GlHoAPVG@k<-7Z z-?rM#0+L}WD=|x@-c;$=U(%Qw)aMWpHtn#GQ$&QTGR%oSBD{!2_h&*>>~Ws+nKlts zvME$xg3I8Y$A`^3(uh{uY&m*tR;$HgKe}Bpty9pl9#7O}e`LV4@gz0OQf16Ng|@j5 zO`1#oF*{U7Ewzs(DnlzS_W|mJmIRdnxi)u`b$#<%PBkB; zL0kaat@O*7nc%2f6z*s9&FKKW@hU0GZ|--4LLXem%Dpgh9D1dCwzdJRusXHPXvbLv zB=&+B^AIJU*&3VNi_xWqS3eBOjDO%Sxd8pFsxz1E)y!|KMqy;Qdh11s-u-1}(Y>)F zDGIC>hn0iV!&35}!axA4pW(&E-=T4vQl2bj?!(A&-aMSF#yQCxLNqSK(Ebm(Scgq< zWK)!?!S$c~*jbN5y7HSAhp4Q>u{<3nrjb-k03`hKCCphSL5@oGedEve+M6V&*LmLB zypGIf5sQ_C6rj?Fxat(K@j7+Xi#Nnr9VQcXuJ8Onu52XpPX&vHBYH2x6CsN}hy&TC?0CROsP_ML*%7-V9D=SXOM zVht+mF@&5^3Pe%#o8fb^*JZ|1G9v2s(oG7GE>)4*{ty2=tHi6Js?S)(L4Cg`Q@Vme zEeY`X>%Ql(smTp6JYFyfd^r9j^RJLp(bw36sxx?FRH%Z4?(?;A<6WTy1u1D^RYGU$K5)U;A3e;OQHI?K6j=S5D1n}b7K^K< zFdGN&1J>fwvOu^K^weq~#RN??=wU(n5+>mBJ4+#<+S?T`*pWMc1M(dvhd#Ixe!4~@ z!6J1cA;(0X<0G_jQk#t3Gq#=5Kx= zqU#bTZBS$4W9~5m)~Ygm@9Tf|pyZhAZ59pxRI}}VVH7~xbd>9TMxZEAm>gn&jvjw< zf;9Y<0_tfeyGEcH`QdXy02FcsU%!*6s<}X9dtiunrh-HST>!ZPmwbbsKEiRWfOC;x z@~Y+{_n;6K8n0BUC7a|I!4~&954_^O|4k!nkBOY)ty^R{wYT~Hx;*O8&DVXQfbjPb zV#{A2eC*z=tf(^;!Q%mIMJzQ}6SSFLkdV7S^X# z*rtmyGuS63!@(Z=bRii&EfZF=BGytG!|5!a^xD^O&6~AFglZj5rq4_Uj*%)}et#zR zn-6>YDhTHK+6VF;Jt|A&^;3j9+ZY$2E81o>3sfm(nye8_BP z(qu}qELbe>^F0{mmvS!Com_ZZAJMM-$9qfK&F5O$4qZW&_VkYNRt^+!~p5c)ya7p5O9Tm1kT`IabblMLXpmx%d2*@>_ei`pAtS+vV@UbIf%im}YHIAFx-ph7Z3=?I>kJ^)zdlYC zXb8o#B__`-@&@mL&two;6aW@2`3~)X{RSuS|1%R&t#{oRX%z9W`Bj8Lkos3l%iC&Oof;;iiBHDmHpW3cBnEPOwcR+CUc{s2Ych|?~?p% zDjwMZQM^)sDX{+cxe3h-C@zfd&^?i+O+g!DZKq$dbqA83Og9GNFPo()jut)-?$7>Q zV_MN=N;Y7x$m)GNfg%w6KwQ()%(3R zuwNKA*FSPh3UikCiGB+gbmTcQf)BsNiKw+g ztMdaU>&sKw2X}91*QevH@~g>{inZF!wEoEyyBoC5+h{zLvIVVK&eoZh#{DS3uFz^v zV0&Z7W<^7I^s-Z_wv!VoECegzr*^g(p!W2*a2m1^U}drfq!5hKSSq=8y2Vw=1@kCtN!ct;Z|GzUqu;$bkmQfjjOjS5VqQ^zYgAF@!C;uM%pW+DeET@-)TX6W0J0qInHE4`v}qWf%(w7FQL-t1gNAu zd?Z~&fx*8rsIcgq(F~rR;S^#X0H>JsMvBM!J_9T6Cjq0YZ<4FY_&}cGd{L?nuozMHlnW&)9NFgJ5_Q9O$g5T&B4J9( zt`YcLV;P}bD)0uz&zC6VOIoau^ey~6`bDMRy6v*-+MAsPF7}HQ8aEKBi&C&I*25;k zB7)U=Hq0XrgrW|rJ$f0oT}YRXzwF+J*#-%Bl78fFmU)(-}9K>9q zZ1$xE3U~G;v9u5%|) zyg3p6(bYIX%Azva|4!Zf!;k;@S3k$7|NcDSF@|VZkqhO4oF-vNUI5%aw5_5v@JNa@ zSU_rPs)Ft5djDzW(>aEOb84qzy-Emz`w>J2Aqb+t^ZTp z_FQf0>-X$*I|vY5b*yNAEM&2ssD1e7Ys7+rYlM3_GX9PpTw>VNx!)cqf-S5$#xkCW z_FXW$?c4DwKUT@C_aZ(>HEu7bId7#`WH7ejNA+J?XLPVd2zPcQfB5?bsGa|AQb+vO z1(eF^)w0ig`oVmC<^GReLchan&|lH2>pH~`rEw`cFW8CCf^sd-;|jP=#UlmlQ^Uys z%*z3udzZC99lyuR7I+q8PM1TSWF|v@07r_x10m;6?S|d(cLpu>U~Zt+T>V$Zt#p@+ z{knOZWL2#X`3a2r3?O(gd=E?nu->Q{mkXQ!{r35(ftM{AkT<`7YeNR}^z0iX(w^E6 ze}Q1zdV=g=F~>bLbvETZH}}5Mh*8#u+8OP5%I7wWL+_Vf%A?Xg;Y#@vSgC8O`}x%W zcOn1(m)KbMF9cloS6gG#J!H=d{l8ZXdX@qcF0KtFjL-ACqNz{qra!+cE6N^vjgtob zR_kI-t&7<&&sPumN##eaN9Wi_L!mc_x6;ho%O1(mL_n$cR`Te4Nz?$34E-V9mv_-CLtrA22QxPs(W{ z^y77Y{eRec3#h20=6{%O1PLhx6ln=*>6T_`>F(~3PAL&dL0Y<`y9Jc)PU#Y)oA<8u ze}B*OoWt3}viiL{cj_~9@5}%(KIcGFMBic(Di=SP74|`Gj`SY`aZ`X%*5)tCPfjo$+D_th0pRqwd0`u{rR z{QDOFe$fZQq&M~K!N*fzhaYNwru_HGfBfsKg?Oyo5XnaQrG_C^S z)t3-z6hthG$>ulqpP>*3`h)!t4)pYj8BGL13V_2B>90@@h-n^3SET*@2Xb(r5wAV* zLY@Kzby|4edrIk#7N8&W+UkeLeg?nB7@F_c zO&1OoXSd+Zz%6qN+)!$r(m7dfF$Gl%)(*(~pe8h-&1;P60!gYAbMyYg8awEU+F^FM zsG>>05lAE_qyQ@cWtHC$Zn_!mF_jUru(n)N%j<_V7J-qLPLb6dzM^h%)o(xB zduslMKt87NC^yMyh{hdK!nBY-)O&j;?0bg9 zRQW3YR+#KS*!T8((1YF)-A#BMl*C)%jC2BzXVQOo@BRc(b>yUx8jJuZ0`P;LOk42p zRlsX4cDmU&L^RH5-zWsP{tJ`@mDf*N7YkDDg6f&AA6Xo^Tku-%p{flN9ug-vg9YgaiAlJ&`eeBRXdhxyB$R!=?TvPdgj)N@JSN#8UlbR=D(e;N4P}_>NvZrX z!QGU2naQOzUh%eW+9uO&S)@TfbSnwP=qu!Z3uXp2AW>xTv5PMPFU}JC(tX14-{uQY zxEco|ChgwY9~13+kd%ds5Cr2>h}4O5TN6-ynr06f5Y$BO#91~&zM*{Rf+j@|%to0g z*Nl)4YDkLzh}5Mt&I~egB(1M6gGG+D7xHu9AXom_w-lEP=laR4_F| ztizu`1+&tE>*@;-%t}pj4Oz{EU=6OQjN%US8sREn>1hs^c>&J)=t8et2n6#9*}&0= zZaehDnPUPpM08`+)Pz6su80jJg*MEA09EuoR4|j`QhpVs1Ht?X7)Z;KWAsv37zm~W zh6+Oes_pm_=0gGPPsQdt-ygT8dOU7@9YCP*Oxh;{%k44Xpm4bH7Sk~-6M_t2+z>in zJmnuL=e-Zsapz``7lHG83=A-Hj-JRr+O$w0m8)<;$r7zE;TqRH1A7bjpq!Jh71mCy zzE^d@%br?&`rpE_#U!>5mkKka%ezc>*$!pgt7AqXnBR;OdI}g9FeP3cig5k3)v{H- zz9{(*%@X9AaCi*H((k%sd^y2_jXw>{?@$hE3Vf^rR9USbv4x*clFrNsR4WvHxWy4G zyqu)I4F4a316A^f%(!Xlzlsb<%-F}~5riW}Js>_niRk=iRt}{A_Zq*if0Gb&>mNq- z7xryY2{1stn-$W$8swreO)m~G2(_yWxW&20gP0w6oRI$N$-}+$mcR}&;L?`>?w9cjBJKUH0Ph4-q3iIb#OFTt zYsFq-$0k(z{P-Vjj6lg9{Mu!oE>QqA;BPk`Pla-hS}TK5Ml)o_GnH6g^EK3R{9oZX zPRMkHm5146O0~oH(*I?hJrc-aelw07YAuDVC}u_ByrDV{IDTU-1JJ|9`K|CvmX7jV)q)#;(k9?OdQxv0`;?LZ5(VJcqh0&UWwd~ z9O`_&PUz2ON&p?lD<&S`|5;rhYZv&(Y;#HVWEsW7LGFh zqgUke&~;MfdPm<0>H$E35SX?pDsm$1#KH&%?JR`x=l8lJ}#q)-2ddZDx~@}${G*2Yti9?+mq1S z@Bb}0(@+bYWoCRD`pa68A45sp!<)qdwPG;zy%uZPHN=YN@zO{V3~N)Qc6AJj0)_ThWbJ;ex_pOX6ZSidI}z;1>Y zUF|ZWqLjjN!~S!x?@)n`8|{3yRsbwz3{%B4O*_|Q2R=r&(0peEg1+&*(isR&p6aMB zni&7bjqH}dQJWwO_M2?TYKObdrSz{4lcDY|<5}C$Uu{Q&TZiWnQ~2@3PXg#3wq!-Q z(jQg8CISo;>x$i#kU%*|LJ!RdOaB0X3WE4;Zg?fTwKgxploAtyo9)3rr?bfflDp4t zg7}w$e4&i<$mlNr^RRc+&~@Z<*UTlznSsh7bm3W{{H4to9Voe0IzVUVKn*@d*{N4i zLpg`4mWMt%k-?Fc+ci4z%X0i5;b6YW-qdczg}Dr;))Ld5f-)|AffD66lq7!xWgPbG zSB_}kG_c^gWPeIz0yj&6x0w-Zo{z7O8@~=S0(aN2ts%ZdA3B(G6E99?>iRBvNa>gs2&GUke z2xqwe2RQi;IF zpf3=yA(>lM1~`hlQL&k%YDQAH-FyPLM$Z4|bdCxBA>39dzRuu-kAKK`YUycCL~>1|RY09PNTkkbI8$xai0(DJCV>MkO{0cnQ& z7aI8wGXnH`7cW+IMAsh*WICXG+M&-`4O-O!xk&#C_32M`_#33*+AV$Me$x$&3ypw| zBTjQ$Y2txsBs^24W19L_6*G5UYSdz;o^7n%9mD$3{{Scdfn0uJKN{(S#=qL`51^5p zLTrNs`Fo&yq9aAScz;wu6r>%bp#dxo4uF2b26<1+sC)_^_(k7sJ+YS6B*20_9Ycp* zAo+hz=Q8{s!qtOnoX%$1vp)}84F&5AjGDU>#_*i1$p1hOWVgYIKahC+^Vi3OnEUO6qM-fXLJ36WP3APgi3f}RW>C4poLTc}+MLc2UD6Fit@ zPU8bPq_oNoA4VV=wjG|Q;Ln~T>va<8ZlE*HtHx^6!GBW!2QBT)-zxgA=s$%> zfwV?E&O@x;7a-f<_%;^*yFPx8(93jjxfvuwgUhBL0S`A?_rc@pX z93_|ITX@!c>2Hio(j59fdXJarc7J0PfwMv2=+j=cD(QE14nbu7fxywPy}6sGSr{B+ z8jLW9VwGCaIz5-};S61?_WrACm*R>Kx8KwCs8C_&b!T*)Wv|CN@X)9-y88c$yeS-> zx@Tlf80;T4en&Mo*|_TTm&1qA}ekyDg~HbYS} zy^gBfKh*p?74)@35_lpsL_Ppo7NL1Vl9^T&nGC%pneUnb0LfumKc&xsFuylBDOLjK z0l^&a@s(%@B`Yo`XtmG9GSY1mDU`~7K%sM2(e7_=EjR{Uls&v$KnIGyq$1Y-$;=^C zNPY>FV4$#&2k>zqlE;%iU%UtIok5ZFueSgkL!B5jtP1k~jijORH!xk>?Mxt*XArkk zEpi5|SAZpTb;KX*g$jA9vU;!e2l{^^A{xTqhzLm9FYpXf`M=y8kkMZuIfVI_p9AUE zUxAiz+ix%p`TPqLL5bAkvQ8XS-GRa){%=QGf4I5i2a=lvPtqJUt_dv z`Nb0upS&bj$NX#9e3<}k$B6XwlzyMUZ!%%QJclwsuz2B_mbSoi$*+0Sz==OEJk|wx zi%|!AH0KGF3WIkwHw(hNiqz+LH1wwZ0P!;g?OgPV2eo`l%P@z8qm5~rG%$~yO8K%ol3}1@!ruojs-R6kY}mf zPS9QMG&~zQRfejov~+a$pLB%pAOg+%Ln7uvEMBNj=DbhxHz6-33pkm{A;6;x1HQ!r z7;gEk-?AD#2O6zD?cbiEMMKmsvopj2A5+10^#2z0_DD%!OA-+w{!fgiSE~j4O&}B^qS-07tJFPcX z+<*I<+CUI>DrT@hB1xqHGUb5qejJ*S`So)y4D?XRxBViuw2iMpJTSdIN(GW#!T6sBjq2?md^!zbz$`7U&xM(&D zjp|@bl-Zpw7#&~^N}I+C)k??Qzz`}b(6}i6k;~NMoqDPHH$L~kT=V2-8kG{@RwT7? zPi%BBCW#a%8Qb-&c0M})PIUmvRC~b1pAEq&@VcdT57Pt)Yh&0ai1)?e8b>V{2mH09 z;Rmt%8!+HH4m9an*xG&yK~jFHwB`2w#nDDDxc9YpR;}$aEd~xY<8NEpC?SjB zv~yR{dZscOG*rxe(e70%r!uOT^?85W0fYFHrH$ zM{i>KZ){4gPtFz4=t?k!%@?<_08Ma=H1hY#az}iAwuiqlY6y#%TIc9EQ=R?oImLY zTq;&;)hSN1E+QAJH+2!ao=f(q?^%!KH7l1YbU5NJ1@to9X*PRiD_43m#aEMltepZC zhCcI8cM?jYWp_BGicL<8ySbROVMk52os!ldq3g;M6VWm3G+js82{ZV;$CdlR8;6 z7Hjomo?YL*oUS)69J3vPY>$<$^zM6J?>Y3-?P#y&8c*=)C$rhkE4maZzR2Rpp|8B$ zU@xEXpdufyc%@tzebJzf9Ry}PUgX}SR`q$<&VrU*pH%lWMy9qWTZ$Sd15MY-!Gk2U)j5maP#S|Fn%4izh7sBZIrRJ ztvm7UI_-hx{ww2b`XYrU_+aN9Dfhww@?3#pOOuPi zLgR0T_!BplJo(xc!E9yMxRdgal|b2Dn$KN8(QKupONpAA3vEy;b@EEht7aCm{nd#n zoty3PZ2jUS#ZC`SERXg@SXB)}9f6xhscQA+(Ry{IeAP08w&Y^;7BD`eo%%uXwfe*> zpO!rRn;*7{4LM_9zb~}3m;a2KSY+%;V4#;RzVc2{yv8&djb|u#8@Eumh>T;aE*VZK zzEb}1P%BG7!*C0|sCgn!OL9h|o})#Pk#O}eY50cHGGhtcLuBBbvkQ<(apc zvOBGWghl#nycnHN+kfZ)G((S}Rm%Aa`VT!+Ym`gjb^vt@5@1NNVzbo(xcPZFB!boX zDA{7|od{^ur|UUf^>ur^a1GociU)m8xby{PMoV=C8us1lzKZ!{+wV@5v6u~|YbkHM zeuP~&5Bf4y_cOPCT}^>O@>>p|v!1I}pi_~$%TkGB`tsg|3BftF;uWeJzU0aNqVA8{ zm3t&=ps5++;AUPf=c1<)xAXR@6Rr>M>e!dLaC6^ppdj(6xefS zb^5F7=8H=nhb!x|S^V)5CFw4CH!ACFC4moA9i+tOUVY_k8CDH0SKltY2BIItpEJfB z_fIbNWJU3Us+s;!dYvW@_G#O@rfk#FifsKXB>O9=2LH3UZ~HRQFN66WTNbzR^MRp` zqNf6SWQBWtpRVt137}{&`($8v*#*knIRxuOrXY8RGyPzA^p=+`Ytg=w5zli4vln^a zt<^-WoA{bc;C2fkXB`P6qUHLq@UV*{W{pSM$Sj9~`yrjz)oEp5fOEgL{f?S0AEfzC z-+QiFnSOqdUsjH1?~}6F$d7W)m(o>8 z>?@~nF3?V9@Antd9+UC$D{vIg5~|5Y=A3*|pN)60i;`2M!c~Y_J7d+k5jAN!sw?`a zdH6g-XSoH-nj++wU#?(GR=`B%83;C}FQa5NSMyVAcGe|88``h#ilr*+nYGZeDrnEg zBF*xil7`W$`A_4~SD*OgK`+upA09PEzN(t}DBSwACtk=}snzl{z8kHDcV6lX9}en0 z?_{re5KhTv@&k_RFQ4R_dVP7vXnpGy9zn#dm?QgvPMUy9M^l?=`*CjSqzavCkIsRg zYxJ3}ms)D=)qQnS)VY(fN_W=jjisBlx!%jK_wYwr!Z*7McrP7(O7f+6YdM}pIv6GN z-SOXlnz2;6GugXI-Q6R7mz-C>@vKmNus242=ZR^WqltKXBVv5weg<+#ubOUcOU=oi zYqQh&pgvI(xFy~wMoRWVD(7}_qkJ`&QZ89}eNJb7Rc6NvmhAOa+=Azgb=^_?)$aDs zg^QL@!_ic_1dOq7{$TzImkrRrweSxW#Zgfcg(fq-+wt!<4t3_U9YaJ$nhlq44+X-1 z7RD?ls59=XKf-zeVccnS;6ve*(1&)RT53x*dzv-+b zl))v2P&(=PT7UALkN4|=x!r|@g_Mut-}ut7c8}*%=4VspFMjN+i=wL*m6?%VEtZKH zZg+N-VMmS3JUU%g+i8Zc`sUI-%U1AGgx7;G-e)X*3H5XtQaBvl!*QIanb2PE#4<-} zz1}gg_cRq79ZlH%fQvRx6*>yQjp(w^8r78kxh zSi3cFd`o}2mzyURzBw?sT~}#aHsdDl#B(md@PjEfp`*)+V*w$EH5f2uqWuP|GjW_@rI zw{bFQ=GNdcR)0vqq2E$iV>29ROm9Y%+!4E{LgjLB(M_f_=;5rsn7r|LjitI>;O>-U z{L%d)E8WLPJvKzFGphI@hbiO81DE|@Ntn`@urSeX+cx`Mb_GFWc10Ua&WOATg(n449xd**hW&it#zD=j+Fx*bb)h1y)za zY|QwB`5c+Y$4(xUBwr3}ca0tjcsIhK#M7ySH68F2Yxm>gcSn+Sk4{di!E)P{k!{b~ zYA&p%Kupz?)kCC6pGZdH)jHiyys=+&x~o^Hq&X8EG)A-G;urGDQo zid4o(`Tj}=7;rfpic2k8_7M#nNDwpY!93!1dnen-KoNneeY$$q8snYP=zf#~H)Nl! zZ0hg7ScdEc^JcPCv-x9O)WeAn@80Ph8K-N#WNLKWkTcdjK3uJgh`>OE#Exp|t6|l8 zw2s5eOJmgvw&ku#82j+pG8@Os?Y#~Uw(|2o_oekz#;~3*7a1#i@J9Yk<{&?lErX_{ z?z`q!eh86hupg6{pNF9lxYtHGxy>dfVqw1hq|GzVwCc{U8-G!W*BOD%8d%7}8c*~2 zajBs->Vciim_+i@atje&qx}lW_S82KrwqRIrTyI$$G%$0O7kNZg+UE+7cDMWopIKW zxK%rqL%rom>s!N-^{%c~BMLSccDfRqg!MjmoG+%IXq5@SIE^65$U`s(_pS+9Si)TP z7fSof-@p@MV3C+qVO8SaVx});h|QW&2OvaZwCc^4-O$QEp7o7Z;PFLJi%1nAI7?-r z6DNzMldu+lO-P4vdsZW0ZrCo&-LRLZuld!ge}+yu|C4^54?N8|SJa8CX^CR+zGw&k zq)9c|zARZhZINs>@nTYD5PJ*RYL5&GCYeTqrK<7_i4&1Q*3v)RXwO>g2y zjfywty2^zp-aYKaA;(re*c+cgyWD0KVT{_FvEa3C?r^-CU#Z`4;bB?a^lh0U&qD0v zXrL?QwfU=vEsAHBClMnd9w!<1Fg6{>-m(TsrbSytm>b?%s+EWu!XXb!6U$3N^sL6_ zM~BD54xI};g;oOKMYWg?XoT7{F);R+Z5mw3Rc{3yNe$%(v7j%@d{W%D}E&P*}0;C*tDAR(By zJX7qmI)#0_n8q0F{Axq-7~U_?&~Bsl(rkM$H2T+Tt<-1UwJA3_(RLV23GA=tJ4I=e zMHE?l!-?gdib|dIvsc(}op#*ijbyhF{+2v88KdE;@Xh2!9n9PO{YpFy$IZ|0cdZ~p zL|#jMixG|cD7_oKA_4voOF!ZuD-hr*%t^FilE+${BM|h!JoxhIxt6TC^^du_v8<4! zR7S1Z&n(Mm37YEa>U4U8i;KD+Rwt?hxMKIL+Pob2_#oIeJ3R?ZrHbrDT6HYr1uD|^ z6=h(&Z6^bZEpnJ02x?VNqjO)B%gD&!^*oY?r$KQht)*UBheLFi=W6y$5)_)WcfCHZ zx2W|XZ{S;kL$p3iN5`w2f9Vj)?{!Io=H@_5wjziMxq|%)=Gx?mpuExMCd%;kL03L}{VR;I$VKX;mSI9%Lrk{)_A6YAb%%*KraLh|D;XTR(J793r~6N?^`hE`R*N3k$Hufs=d z13jTW+#}`cn;A0M%CT6S)Z$TTqr{^Xq>v(S_L{{`7nH{Kvv238KVUFgP-%*faC=>pOUdXlfUMv}EPh0w-0S)vjs8W_iyN zM}^wDy^7?Ere7vgGky>0E6JuvU&@Bp9WEatDsV}2fgE%<2q-{Y$?6@J%cq!L3knJP zd++h7oD2!B)w9#s`ZYRj5821HFVWZOWPHMRbc#b{V&P&=KZ053KW&~uiY;Bp17SlC zdZp>8T>Z^FL$q3^mU)B>xDPN390mJ9YsM~sB=udbUMl*qdeJAOp8qikvVfsqR*&b zQ^@9FHobvBZ#=z-;mXS$x^w+Gjwx!1Sg~}vhW!UuwQpE#W*hYjw-;t}wCN8Yu?VW9 zzGQsPHIy7FWHd!~HZ^l%@i|IZCr2COSwkn@UafvIHFf831(tjBfy*l89P`yZl+T1M zB(!MU6XGoPE8Iyyh)?&uC z?=|;53`jmrJnhftD%YRzZlZ2XT*$aHXL1j-rANMeUXGPkZ0cjAnfk2C9(M1Tb?(PY zM6`vAHPdZ8*3<#kgu=Opu!v;U-c}QeQW{OcrHblsMf7TUpTB>9fSrmFPvqydLDVtI z>=awTpm{&*)M^|vg)?m()L(`P-7iHt$~i%UmvCjFxz}SqC$-x9@zvNXON$p`@!wb-E_fK zKb$P2t}4{&&wSK`An5N`VKQ3YbR}BNDkNlj;>YU4YVeW_p&Szp4z5V8G}Vj%x6j<% zT)EOrA^KET9J#x@dlaS6} z9svQh)_kZa4?GKi40Ef~b?U0&=6=s>%?4tqqs9+QFn0K0j1yK@jTFeaxDs_fKbECO z6Il1%sTEki=UO~44qpTeW%}M|DoTcl zf9UHtjLx5zGnCr~MY&bosA%7Hf}%L}rd+VFu=duu7#PpKflOmQsJW7}oE;y^+VP{t z$J45JqCMG}lt0P9@n};|Prq*TbNM-}W@S|b!2~rcWO0s!biQ*i_}^FZL!NVTa<=o^ zYl4oEnc{;h5J-sPH7GbJeY#A9y_}G^1Cvk(MUN$rgu^PV%T9Yz{u23f~ zN^_WNMTtio5IDQ(p(}Mg#OF{A{jT+h0Jp26h;h*nQpDie3Wtwrm+oImOfFSeitJoRBN3w%%@m;r=kpRhy+RX-w#dYYB@qE4*N*)U92EJBRCZbUe@@Vf_#efy*YbJYlPU{7Xwychos1I^ebL;s=| zKZ{PAgib%pXP=oXHVsRBZK<5QpI-H2L*wo!2HKB>+5oDM$QrvQ04lj0uh}%%3`zHq zgi}bpb=n!2y*2G5F=(PRZzg1E^olNKLY-P!19&|0)7u{_P~_5hl0L(mQb(baR1b+I!sbrYaIwd= z&uYbT_5O6r(4cv|1w27;P(+4xvg@1qN8AR??B)v+jopaP5_Y< zL|{*9HdzpEF&F=i4NOMtbKJl_LB)ETxza6nNEge-mnvmG$A3%4!+*nfv)Pxhj-?(P zbg9%KeiFE|B0y*q&+Ls>tlgwg8N?EqJkw1228{#GJb#PLG3kOo=zBFDA_zY{l24!d z`fIde9_T0<(Ztc(m@q88=hExr;yyZ|EF; zm)CKE$QWw)zJJN($fJt(V;%asOH{siT7%n%mzW6zC0Z&~DoLRKBsc9 zzM&@Pw^a9&39hr|!*NLL`}d3Ln#VaX;N@ z{>$K(;ZHV?A|%UV_u-EcrHrKfUc#c4n7&hGHgvBnaohuty!w_1DTV_To!~WcO&M+? zn$}JE`bK-hnYX^LI-PY~qp~&OBe~AQr0%^&KNaMABZ4_d$Re7oNFUO)-HG-NJ9ups znnMmE+%LD;El5cpC&AZpj+??X{akY2wslE;Xk5%tI2}- z1(+WXwD+?d7D`-&I5>WQ)4zBUB&O+0cSS9$QkS}bgD)Ahpmayth!caC_fGBO$YOZX z^PjLb8$?(W=AIw963R+MP=6kOKkAx<-`)Q__QL6-y=nOibUY_jGP5Z3Qzo7kB6r>U z@q-B=ig?GTr#YWKJ0QG2j76Og@ zT%Dkh_fxH2>R+%q#zhpgFSFf*C7?08qOsh+sTJsjN!WXE%SS0y!IV;~NEG{JiG@I_ zPotxoY`(#j;9Kpjp{IcnBu1QwFCixi7msa5+;rpH#s@%-%RYboX(}58oBT(*P=GQS z?YGyqd+MT$hV&7pnb#Lb=rpCE{13rS-V*XC5RJS=4n`|BZKZOpxDyt(XVB4YqK!3U zVjxXGzV?B=V;%WA-?B#iuPlPnuPnlr8Q)yB!TSoPpm-)6Wsbf`|$oD6aZX?r|)MTEQGbybNgRhnsv`75BGgl7@re~13 zv8n7i?o_vGPN;{VyQL}<-V!oX%zKvPk#z>e%rqa>)KK_{Wpz1Sxuh?x@twV54UQX6 zpEFmdzd7=qz(0D|c9Sog{D3zZ_MPNz2Yw6-)hWG~4%(W#=AFpdxf{~QXRKa!!APpC zW@pY+jCw{di15bfnw_QZ4XL2>R$QPIKjPGNQcA0P9NgoXnNIbY$|Kbd7lURvpz3Hh zJRaAJ1oy#ke$`d%jBR6Dh)4E4wUp7bAJ@X8JRzYI(i4Rek?=hC>7S0c>4{tmb2oXn z;2?@_HgUbbJ=$gE)vW6KmcF#YKe0J=EQBgNUZzoMqMV<#W>mioGex2wbJE+PXa68xuE^>obbjIY ziWKINU2LZ^Y9FqzAx9C3N6sfh;wKnc^2wXh1ep3Y(jM~%$A;zm`zEwE1vqOH3s|Qwm7e`js6aAV z8jPg9#vD;NzY;i&4oP!Ohq*>8kCIg!m3@b~|1nvHy+FkBsA1``2(qFyYQ*<7W*f`J z9*m>cr~8uH^%XlsMm#lp28<#7-F!4|Z{V1u#A26pWvucvsj)6kyIiFjRa1!ZNbxfn zP9Z-ltaew&U%wO6rhp86@X{+Yh(7>Ed}Z2=(J+W;z26AUM+|+nW@^rIapBWMD*qF{ zycXmt+X;y;Rjv9J+*LH5Z)$#=P#PHEFf5wYzlD82Tdh~Ueffl#9p$=5xxC>5aX5}- zkuH0|$dIkoW&iU|0L5ug=)v!UDXHrEh>GStsD&AlYl^~I2*PGK#0t$8wf3hoT~lN> zTYj+0*`oHO_`L!|zUl&8$~4F0hEH^XbsiMafv9Q}sun!$KO$?d_}KiA&6>NI*G|x! zK+2!TuuB8R;p@ZaoRKWm`;ZCrleKQqsVqYw#IZ_g-OF3(G$1*wVB#f8 z==BV>^Cw0g-UN|;^F)a2mdyOl#PT!(%ID>;%=e@8F6KXZrrL*ADXCd>e?@#gP`t`+ zNh_4~@$0O^1Deeue!ElK;C*V{9*}$S^A`)ldn|L=?(gsaOsnrFzCLP=5~Wluwq)|O zB-{xKp)zuYXt#=`Rn>T{z2SG80&bUGiA0(A3{+I&G=##@cA)8^L$&rYBKiYJuH3p` zWU9ksTNFBfSXfxzf?7)Ni7+N>1qteR+K>ZIml#j=_f6yKLH$o(&bYnO!w5K7oKAQ$ zJ+GR~LxFE+)bh!qfV_J8zB8_Akx~^b_x-itX@^~FXKxGs^~{7$$7GSZh-}iP&v>}uc~3xT=1@0io{-h-b@jw-yxwU` zcx!7*il6_kMc+od&&$4%RG@BJENKg z&klI66w%Ip?j})O{fO9eov0lewx5haKob^g0=2&di{YnS-0*l`jtI0c99b370t>ju z#_N{D?Jp?kQf(?R(9xB~CKbBk70FMr$b8#KPcLNicP}@#VWHG6k!=H4c8=wIZK$!g z-!h+@I^PsjsNE_)7X<9`RGBuDLau@#l35VawoTdLeFw}V(JPpJ{;@JgO%?V1+uL?y zjT7Ry5A};QD*RD`Tcw(&^2_2?rlkX8vmLzAy}d*Xcjq^p7LY+%yq|_kahkL7PZ+^G zk9yf3#r=ET)S|+lj;ICC9frDDvGcKfrhEa%{%!_q8Yr(|52 zfv=g#TRYkurNGsEM>Xss(!DjbPO>B8w z16w-XJPE!2>${tit2DN{aXW>^Q(q3{W)CN2h!v!)S0{r@;(g z79&z&1orW*+Xk}2x;Z>zjn$m^eTuBk(?LzH@WZt`$d|R&_K40{HuHLknfliZH!nRB zYAvrw(TGf%n;CUpq%BluHsf#de7wg`h&xuK9rq-2+;B9yqr<$rny;r`Dwa;}+bDIp z^Ucd5&3i_Dy`RiOD`KCn83y(DuI`4Q-3@eAG}WKt!eqK!+w`A&X1x-$?YJ|C95A`u z6p!X7*wN2bSX&ZLRfI3((3vk#bPw_83}B4b_h{F8G)c|dip*g29IcZ`sqeL{V9e}3 zG7I*HnS*U~{Nmp&pXog75=O_`9hu{h-8UEd_4EB#gIYRKZ`cDgJfuP!Lo18oVwo)& zqej~BKd6?z&U{Dp*{Z{YnD3htpJ=>%)WBGwHi<0lOU1y=kIKh$+*^1GoJW4Fw2m zN1ghRgMk#D0@!wN3WgHXF@X_tQ|GXTGtFu`pgec}L};h|oPI6-1s}U?evhM(AGP>n5h=bALLP zmL(k>o!vtHUZ%LUn^y0EB=&G-d$;djP>ua@b{N%49>&os1%s~K1u{4WhaYBdZ>~@w z6jT^>ngXjraR?BUw~}yK8EfAx#w2BBv*9CZ*RS4Kce)tx`3mfe<#(tpA2@(~`1eGS z8x+Q}jlK_Ay989a!<SS_Yu!VKD!b!V4~o4Q&Um~f%CEAO4xL`lFBN^ zsi6d}$+x6@1rKps?_awq=Rt)0=!=s=&qJ`on1tFexItOChAgGI{H9tRN?$%azLHw2Yw>t67 zdKXIp(+7?UP|l2KF1~BPbT}`uQY}a9pq0>``py#)z|5ry_%u5RtoRa+t=U;0o}J}p ziiKY#FlTz;=Zj(T6Db8`yeK(KqIg_(A!F}hC%>V& zpATi|cLaygLx?`D%);R_MquiJ@nWi7FNcSRdtSkjy;|)}kiQ^o8yu8ZFjz0}a{Gxx zMkc@LeZ|0_UM@;Rq8Ab9-P(k_6GS4fb);Wwy6o-yn#)%}t9zK^hdjm0x5{rb6d{)c zq@;4bu6kSpwReO>hp?6geTd^iP0br8Yf;ba`pV18^MnGBc(2O*{Rz`rQ&=zziEGv3 zBF|ooFy9j zkNL>L8+Cv41>JrFYxN@0iZCzwk_YSSQ!D%awil7l$>#*Jn1%&j5n!wkN5I#;a7bBo zKF66v_H~*|ucj8*IB;?gU0eVe&Elu*7Gn}8bAcNu-u;5GyU5=1gd~7>jz~0bcCb%L zf}a;e^VsJFe0Lq}pQs>ZqGqyBnLBS?69#P=Ra)@t9=2;FY$h=2=D;D2;f~I5m(>0_ z38K!2o&?1>v3+b|urE_GyMT`?1AS>AF*%rKp1VfIV&dsb&0fe`Ba;c!giAQ%wy#3? zbiGO%?0s&|maV7@Cc2Fsk55q8%~E0$d3XmqTBT$ zOEfbL-JixFTj6MI3xdTz7Lm}pp5wna?wzCwLD|q7r8=RFuiFkF^7DspgrBOoO^|xw zTJvCa!xTv{R&NiT?HC)?z z6LN+%kr?aJKc16Z)$Tjz9Q>qnjOt&M1hXUucWXuztVSh}b~w!RZf;n$$R}*gf@On| zFBC>iFvYx@1mzGB?@T7ML;L$%jp18Pk0oQ6j_U;_W3+FYFI{X7KayEg73p+w<}$JB z4LWIyc@GkGZ1!KrOHnoc@JvHJzPZ?uI*y@=%lFgeNfl!`MymN@CvVliM11rkZXe&Z zy0q~&ti=CuX|mFD+ z;9$}DL9|v>YhG!0$y+8s_MlI`W2M8qZLF@+vV((i{s9fwrx;Ez_v1jtYGXA{Vr-UI zmARO$RR<~@HtuoS(z>r#oZ~zeZO?piIq>`T&;6<2<`73(SSDlteBCma%fmVSQR@fi zE6vifD9ton_frHpvKNxKDada!StkZ^J%jjh?5$h7`vVwx?d)eiX-gkIdX&fT-%GtN(N`i{rRE7j^RPXeZNHfDuHjZP9U4o3-GOSIuUV{$nZBImP)&3@Yd;G3pP z@qHWL=-QAvgGbNVWL8ZXlco2~wS*4)^kFH?7i~s)YNX=mM`}?Oc*9XUuf-#G5;IZ` zo z$*KrVF454BT%5@V_=m`Sd{TOUz3k$$Zggv}#$m@QpBA;iuZJ)^5=+-`h>6VbagDql z&7-UB2*uAL;Z$s6OIi0KSX-JsQWo@9DkpV&#h6~uD3@&V5Hn|LBNd z%cC^~Lt)xNp6wXhPo!_uykV12!ErQkSow=@lR4>qTCHuD0+pD|dRJWM`v)JAgbT^~ zI!=v_2(!48(^QlZ$(}q3(0%LcKP#7-$vRm&I=#c0r?SSB=kh_`Y4V;}^LL%~jc@7I zKgOckeCH~|09xE)2MLYMoqmucyd^F z(}dIaY-%9U44q(fh9wXAq>XIqA+DfEBC8-cGiu8^@2BCRy@?ql2oVyh3_!Jzkue}w z%!52e#EzL!Lxg45$B8A;I#Dgv(GGXZ9m^T8y|NMHvH!|J(*yA0|MNZ#ABD> z<@I1xNg2A1-49M`Xi|5~!@Tb07ecRk6#WyOkc!AjsAynK9Nq{Itmw;(h>YjDAFcCB zR*k_W70A%~d-?lQsKK@eQVQ}=1Q26}j5B{D!oe|6*80}*g^S8_4_2`dtb6d(GbO)=qj^eVORz`nTLEo z#P{kCp*|e8+ha+=*WdR?#!~oBQ1mjXuO%xvl_k{#;uk%GcYi@+yw6wI6ADo1VyJE%3s?ssK)USC3A1=j3 zMMXihM3YjNYY>dtEw`-&$_NNy#D|;N17@d4`8;TLM2LLN&m-*|j~|NhnOe87b_Mx9 zw`2Hx%F}lGJqzD=Xs+r9xHN&XLTob)iwJAlGbyTB;$Yu&19?A{AduL8`Zb-rtFM;B z&>wfj8eGLX+$Xc}dziE4m8soh(=)7lo%O)_MH5$7@nML#FYR*sRMLFCv$P+9JAW!4 z3~_xQ^eoNpq;+%zo)L0j{zCDsZSDi7-mf3O@!N1qJ|9MvG89iwZ?fd+qB=mnNc8(C zv2Igr!oce5eA@ToaCe11>Hj0^FQejWn`m9Q@!$lv01566!67(78h3Yh2o~HexI^Ra z?g4^ZaCZyA-Qin#-o3vw#yP)f#%NjVo~x?nyecChSvD9658KgJxO}_$`wph3rV`RZ z^1=X~DO2Haf>NSMNp;0aY9V7@gn}WH5uO>7^0Z7MgFG#}tWRK2Buf18(i;Di(BekM za*JkH@pE`*^~vw5X5fRbYR7M2Tu}sB1v6aQL&LeBtxNWWNQN-Ls|<(l_v2a&VT(5o(Oavx#dSJUH0itAm6ezPqik zYGK)8?)&}CQ7=N&df3SEototKh2ZJ?EEzgr&;=uh$l{(xjdE4XxFWR8G@#`>!O|>R z3Zq+{O(m^O>gNrKfZo!RswgKeeh(X$Xa%N!I+fa7Ev@G$sSGWNul1trOrvfN zH2Qn7tK8c7ijx~LVjXT>E?_{7E zIUYkn8oFR6vvv&`BQmO5rJULs(MBeP4A7a(KO0AF1M_#@%GyL$gnU2ruPz>m3eS*xnL~fimn<68&z*E zQVD0LZLi5^syCCsx_60uEqRHV2=4;g9R@Bo?eh27oKgb=Kr|q>A%NbKkfWmsp6J{C z1MFtmqiFACqbaHMGe(63^kwps`XPieeoM#2-aTBV#3q9Z>jhzcaRgg0DhObplaV*6 zb7@1!;C1CPxNw-7J#>lLX~2g)sP`wrh|<=cgCwS)97s-stw<;Gpd;XHa3gg;;nj8g z*CHhI64W6*Vj!W+8XS5tC}kmmR*MZ{6=@{@f}aJMRcs6%ltZgQ0c>Tjch@b_W4{< zv3S3$P$~^K8jpU@_qBlT6lolm*eYe%sq9(;7|2BZq-X-Y;A7xkK^$ApC2E!Fx)3xf zJ;?pgSv_dYVFQ%-%HbE#l5#1?`d8uM^@N5d>=G#18a2=dKCztGqaEN(_%JfO<1|=Z zX*8xeh8bJ_j_)5JBToNz&-1t9#C@OzqnrD>@YX$`_7GSF#P}G0mJZlh8fZmbKynuy zhU1XWj6$U6-}m-%DI#LU&WS0??|F!@6NQSPGsDc0$OGGWRxSI^MbK5goNzJ{hacgn zkW>;R>g-j=U+{;$I{g35cZL6%@0<#VS0>tR;&YuP6<85@T~e7j?Wa=Js*UWLGGUSQ zI-R27J<&}`O`l6_h<|nf-i*}r41yXU(nKAN#JSOI>+gFosRt4498ajPH&ZCeBgJd< zI-veVIX#Vl#GrMqP#xEgoHA7qJ|i}dwm%RP!Nr69otuHc7m~)MCO4l|q3L$Tp55{`xe8su}*8g<~qqsV#3YNfU^FQD` zrT(SJ61d~UVN&@a(2QevwkDMSNnCbv>odI8${;oyOlbPL{tkciJ5u?p8N7P;^1tp= zd!xQGjH%m&aZWi+wzmAdyxH>=YXua}l)s*2PqubTs}pU7oMvHp66qdTam*7c8woU? zo{^a?G8{jXkWNm#ApFfO=`dU~%J^Xm)LoNj{37L_4#ET*;9h-vU*X5^SfTJsW;q?- zbt+$rDsHG5j3!dLc<+%ErlP!Rmb<$w$uB2&$eN(*kroH}KJTNApPF{*IW_mWSFaWj z>FebkQ8P{7>2E-qkvkO5a7Gp2gy`k%@m_@+jIoDG|2MST(XCgT>5-ioj1BIh5f?8@ zEF%Bv&gMFJ_Uj`^_+Bp@TBXn#rkBTX^tPz7*yoCirhomL5V~syq3~q)bFVTg5>r~1 zw1dD#UL;@Vgvvw95;D?vuN(LqtHn6={0F`izkB=65-k} zDito$Wg7XII81tCi77BdDV6yxhQ^5$G(;0RDOF>!dIq4aVNvSE4aK%m?5&7I1_{s& z-9mY8#aFw&IF?n)K3lf2ch5VvOkcHSl?P>a6}8;Wir*c`mZlT9l3*ln5hNT}}z6WMr&;j|YjZBZUf59^)! zG;9oNz(uqmqX0J#n_0PAvwm*|Mj4B91U?3~%0LH~#pcVW%GD0J zCH|@r_p?`fm-tqA!N)xUsQx4-y+j#oTG^X2)&Hy)M($On6uX;17?gSOtS|?k!8k#A>cqlsumdvULL=P7+PmG z3%lgQPMY+)yFb!d+T8VCa*_(Z;v^#E;ty`)gi|%I`?ak@Ebl%eV`uglH0!7RQM~hX zJ(NSNX~4rCvwO+c*QaPMv8XNn*MqlzX{jdAI_-mn`NG?TcVqeo`F!2v*71?!_SAzk zBR0O$>0*1fOTyJkxv(zt82s5~-8;S6HA6`f{dtCq%WH=!LgNQ*Jo9)aQ?F3Hm!~G_ zm83M*ljk%p7ed3sqT^N-Uhvm?o)P%H5jra$WwRls~)PqzE_ zS8+}H$CsZ$pOe1+@zbog9JZVJ*5$Rvi@hEkK3a2idj3#-cpiM!kTPD~JAy+uc6&i? z^{6R6y&^<|s)mSPme}9D6xCkFxE3ll5D2PT2^M&aFcG->{X|gF7B=pGYyP96;@u%& z!q)WJJl=@1xf3gGku0 zl`y#lI#va+7F{uG(vQ`Yl2980!TkmG{7yamk3PPR+ZO>WI*nB@d{uUB)~dsI(9l*l zcLB^FzrSdkWcU-7Hp~cuKSF$d^1#<0w9v!pzWrD872o*P1A||C`#q8YZVWJf)nv;6 zj8@+b!6p(msCDZx#0$gE$j%{7I}^2O?}_u2(ITh3Z#x?{rRSM9whjDA!i$AKOoaH( z{UuJcz@~gzd$4|A-_Cf6P&F z1x2U}$ghPPrnj#XaR>dHM;0C~3HBBpg#HLW2y5j7exF>u)S~LGtyMOX{_jvV=lI`HRpItEUz1yp z3!R9NYO~SGCZ2)LtbomJ5{>2_!MPuvyK{p*MIBTr|5mlB>Atg>5O3e2pq8VH^%1%ct+>TrMk{Yy@ zGV=jZ+D(;^gKS^D*;c@y`M`pIDVPzotJW!DN;k0CpY#1Wt;ufbqp#13HW?y%eaDce9&$#!bQeF_@HQfWv@yl z_{sCu;0$r4Y@PBF`s#qWpmavbV|KreD&rdc{EpQ-xnVx+n2lgXrNsd%0{=rfZ|H6t zU<)W<*|)0Q6qOgwuSB+CYsYiWAFq^6Q%X#E1AU0^E-Uh!7%|II$8x;K}PXEDrD zuOa>x{Jw$Gl2*Bf&uVEXFtJP^L#goA^^ee`pm8G#8<4NKZ;9o6WV62KXmi>n4sg@l zV?zCd438psmFbZ!T6I5uNI&L!LlHM?naA9zzfDg0%0Z75@nIVF7!EG>R6w@RD@|Mu z_gGVW*qG|~?OX;YV|u>83x8>rIbP(GAIf4F3b5Fo0RgH-7V~R=D}=%!AylN1(}2!)_o7d5m&!x; z(k?mUM1Kf2yGqD}0v;hY2uldmF9GA-qvKcRin1fJ%QZgDI0Nl zI||+4PNq(A`PO(!3$tU3_wHt!mp4S$sLI2yk0Y*H9cG^Y^)Z;2AfX^-JMv7;C!;3}&sZIWvT= zrL(E(Cgi{3BKe1=5$!a^1E;}|45-W->(Nu`FxJMG#h=RLoW32h=|iRA7_lbQWo$&P zt&m+{>NS3q>j#^Fa9#Yja4u7AW)Fcrcpbi+gNOfT!9geZlnIJ?Z|- zeN|H!85<0vuHtHB^!)7yL6xK_iNEln)0A97Fjv<7>(mW4xr}*4?p*)JVk2RHuy?f= z=O2(d6iS#?LE{pi1@?n#9gZOOo4cm!_0aS{Y>DWz<^Ox z77c&pJEGhI^viF}k@5Qqnkypm1WvsLQW=8FD4IhZIg8-nDrql3iGeFTn!$)ZzdrH+ ziK!YCs-s5uO=zrM?_L}S*vDO;4;$B7Ke4c${8)M38N(vw`TUG|*2rU;{cy8t4%1Nq z>{ni#C15wB`Aw<-%KX%xHy?-r#vMefp?h0{zTDajuiTkf*!!QYapDLHXyy4?l&P@# zI>J#h_x+0xTxW)%l8|i-bSgAh^>qDDpm+|` zZXe<9E*y?V8QURbDgn7hL)BgqEU{{GXfGHaY+pagpkx2yBpn&@qzW_}3Y}&l6JKiZ z3FbfXA+T)W-g=!hBvMdN$n`9DJ+VAXMZTLg9*oiSM)A85NyEFNw5!{$aw9gh9qTY4dsDVy}o>A}%(E1h|P|c8%?KFEx*n@v3SbLRrC?Fk^$L ze$1yOr$_nR<(?eE7O>JhLT++ujGpR!n)U`rxj_6L%NJTjSc8NxQ|Z2)0nWlHD~ zj@rqERB?iwp8L>tEp?TwM@Jpa0|PEwBMzV||2B8n1?^e$e?MqRUa?-jf}b>4HqNvC z`dtvpn_)x0!SsK{{lCnq)<3r>^+}kCuU9gouK2|cR#FIyM_R1`ymRecGA5u`->}c; zAzN)kEq<-yqZxNTksZOvbrvHg37Gx@RWWDvj>bC@Ct`f8wOcBMWv}&NG!$?Fi?B!Q z0#u(Xfg~9=$LeKPs)`O2N$6vS{%*Bq^AEkGfl3;$o4>AwLQvHNOD!$$Zb_E-*Xz}8 z0Wo4S9&vM&-9ig-b`W`V@w{T3;juucX>mT#BYba@EyXyD?bT&e1+q;;p%^ihj3)f5 z-E{gRK@9)kgd0WT_wpbqUT&f#BAVR0{AjziC)UREav5jPt$g;g(n^CJKM!lNAS*cI zcjYfkd%%eF@`##ywiLgz$|G&_to1{(lF6Bl=G(Umw`EqJ;JS_KuB;6HgYpeU3%=b+ zecxiSW`np@-aK>>+6g^vE+;D8WqaO<9F8hkRQQZ)Lr+^B-g6x1JuXobR1M71mh!na z;SkKV@%jZZ9&7(ENne$ZSh$2Pa^5uPZO?w$Qd4t_@2EPPA&K;>6ZI=SLmuLk zWyL{9tL0z*t?76fFCsu_wN3QSw{x<-l1U)Yd#F<#q5NT%2JWU*7e%iV^!#`91Y>m2lU-$6Vy*ZIh*V|K(o0Q!<5lf`UUoY@* z7jg8-JXTP*#5~8YY=@rrRAiG7rf_>up(tP0b$ZBXoBS}6E_tWvm$b5_;($^i9X8xq zU983~x4}5nWJ685!A7c^p{mH{|$9FFpFfght-P0-CeCMC#(BNXS8s zj_E)^eM$Dajxs=;R7pT7k2j}klfa4k+d5}@8Wk*U>*m9yu zStlh5f0t$Mjagwd^$D6#1rz0!k&r|zhZQYk<>G5fsp~F238nQB7MupeLyRKJtOBpX z9;&`)|U^LQo1I3hOYBYN=*y0c5+SOx zw-AIFVNi8I*$g8If}{F&SAdPtG-XhjiE|n(t*97i30vT1D4LLPuq^zQw2KP7jUS?_ z2ecdq9h^yuk5V1xuOQsj{l7i9%XOZPK%?`XvFYUUtM(Ooc%pIYX|YLP-t)nEJUiImUWphPE_V}vkVGgzeF zO4d~(=X!XZOYas?zhFXGHePNEdHFyRO};EmN#H9yY8-(8(8H5H9SGpr<%a z7@Ofi3M~z3WTY=i)<8;OK3yqV03?#Dy97sFO|#Js6}4rILO{H;2l5xC#f1-E0e14y z8wz}r88&!xh%M8sFM`g3LV^t%f^tplNkuttwQ(>+6*m*ip6)$RK&E*ILINk~<@AYH zR1}pUs1F-U{u5_Lh&KK};U7HBCB;+a7TkeBC}NGICu~T@@vC+S+a4FjfCqzbqA@Zy z2d`vSZ7tY7dpu*=N|Dm|a`m129rqIM zs@Zwb)ifZe%lPmZ;an$*u#t`(Iqc|v0 zg>n79kUueB*(p}b(<{P8_<`XY7QEqMe~0fBxl32Ba1?)p+LtEizwoZ9L5cWo^g$8z__g%o}BT3~zNhs9=(Ph82hea2y^V|aeilS5bO3-KDFgO!;F`h9u<%vBjlJx4E6b zo3g396f0yy`WBS#W+ic_Su?2tq5aQlm9`NL>4B<@LPo<4ZTAZ{ze9A)D9B8tdq`J1-LO-k)di%N{@6C7{ zn_|h8+?%gzfH^`Dgn%1KBE+-2#sjW~4M{qNGg!6c(`zDBAX#n%wkm=nG)eV&gIY*%g! z`OO=gvEti6`09jCtL!r+Lnht2eZG=BESxs?2}J1?E2|GWtF4h7eKPy{_-b-^Smu5b zVW+7gc+@6l1+vI(0M?Im)V6Wquhgqu&pTnuN&a+qYny7JAb7j}fNQ(amM~wnUP8xh zzr_%Fs{&Uc#`67aYFK$!BIbkmoORmZ8Q`&)_ybe>G5se{S8<&z#oDnsC|g$&%@%qd zf9o0-lxcN(AxiJ|#>q+)WT8FZLTE6MA?`dE=JdWv&f;KOck0@aD_Y!N>%{WWX(1yr zf0YiE&&spB!Yxz7V>Q9xtlwGM<#uD@I^L(9MS~)rh$bz!8cmCP)aCz!Fd+Iz6a8*} zwp$y)^Oa|GBTDbVKR?f6ax4Fn^~`hH^*&K`WJ{+%Fpq#QESm1kYVWUlVw-CgVN9Q4 zPmkre1~pn0%1raWeay2PhiuW|*X&alT9R3pqXa^2johMhg|_bhHY6Ngg&(zY6G0(4 z8YE+rtniU#_+S!+S@HE`8GC8`SAvGMX#0!TU~K*!K@Kdk*$)HWK)BYA0(IKzWf~PF z&{(gr9_oApf=%d|0+J;=xi+@@h33g;!gKte>_zoPHr3rkdc@n^W_~Y~I$eE2`R%N7 zAg|ITWf&I%T7~Pgt(gaDaFyN@bf)le>vlYR7}w}lTvE`h!ob?p!V{FCVLlabPUam< z&oREM5q#bhA-&{XF09yHXq2}l9LJ_OVf~+k+Oz`_>gn?_hglv0j+G!tTABb8wgLo4 zhK7G09lvI9!6Bgf31phevd2^dRzoKFM9KuJ{VuEoB$riMsdi7^CQO*1I2}|43aYgl zBOnzfCKjX-j;Wh)HE^G0xj>-~w)B0GPF;$NxHMtI<}Dw0i(K1>W*(IIJ|ds{!8k3V zqrHdlKSE<@c6#2p;9LOb=roCetXia)E=>wZ7B&jvx<=i6Vxa*K2PFsa4kcS4GuB_# z82mkP{G?KGv5$eRWZTP{)u>A^IRZyP^po5an%V?hEYNVFCyw$ zZtpKz@Vbo#k#3~9CSe=zH|0Exma=!MY)5h2Z~V#;>K0%oVBs>jZ0S@~b9v!8MaW@H z!k&1ztB`^zg+=VZAA%oCIQs+;6EMJMdkYk;3{|Sj7f_RuB&V%)P9Joz-H#XJ@N$m> z0`flMvMl2!!Z5&mLvkR93XGKrdYOS{1L6^MrV@HF%%;C>W=j<5Fd|ZV>Hs6@NDKVd z@^LO~Rp1Q+8=Ddh=E!;cZ6M5SFd>u5Vat)$I>~9iA0H1&xC`h~wLD$F<|pIX?r=g9 z9cUz%i3Y|2kVJfJYzkqfuD44ilHzUhsP#^-(WNp34goNj1bGQYXt=6}wd&U*0abB+ z_f)q8RbMO6!T+yu7;Brf@?~|(F&E{}a@%*yBq35bni5rBI0|S;3Q-j$h=9kXycl<6 z^e-bRB~6JtlXbHQJlqM-!S162pMJR-W1P#$GACRj4k9lU0d@nY{9Ng&jMKj_Q%B%D z_SO#>`&qJmqm=$FQjYH07>U6F9c4y&b`&VOj)xp?{pSQnCaI)=IP+HobYew+c@6W` zAgRX=C2?4sjEr;<_M%DE-dh?ZGd%3;8$=`HC#;c2LN<5bkL020T)#$q$-x*^$)WH; z;kUnGg&V2ljv@s=J8af~u;S2fI(98DRKny~wCXcakhY1h{|W{9{J7a9wwd<}fkRbu z4F-Tf5jfVg$)!-kw-4rfw4c0FgSU4U^z}crJni|B#)!;s^CTYi40_(5f8T_V-7*hWF1Y|9%;MfnwO@13PAkMOX_~P$z^AL z`C6wI;R3)1yj6sL38}y#=EWKrle=tahb2k*{}VWc4Kc6%3f0?L67e43(uoYAnz`A* zOT0-c(T+=pbxrnMVzU4u5VdBcw%HbssgVnG;4Nb-+*Msg;$2IP^{#tH#`7G8#1n|w z0ykr#-nSdm*3@?gD*p>sKRw>V{pZBon7_3W-&?ba_?`@d=ayBFl&>s~Dg2X3d2B({ z7O-xLT${bRi+jMn0&0~$hQnd0Qc)%}qW+R8K_ON$dkfzDM05x1LPDkjEd2U5G0&jJ zl%gcGEHu_@>c|&cHS1gx+JECTIugyn#PX#A?1JLWU}D(8|Klil>{MrP~ltmQa+)r@g zTEB)Ymg^id!pMIUIOM4_d+slps~%>|+vg6|;K$waJYjrvGinH1!TPyq=|??qc966! zr(NT07Svr<*7?smA>8OhdkK_joXf6wn;$kRT7KuMJVj1=sAw8R;cy$HnO1S+aKm`9 zagH=mNqn?kqatKCO)rz*b26p%Tr}`}QJrxOG|x6a{py7f5^>XYc{$oZ#hIozTF5jw zdbRgy|JB_lUbBVFaF(U)!b3d!zANk>(9jDv zw!N2=KgO6_B2MnjYP&1#b&f;Wq>)Us@HPZdzyEJzZ%Dk(G7T8aI%jkQ^`Mo&>;DI&Wd9= zmo+hIU4bR1#ikAA%KfJQwGT;(xZ3va5$Ud9y<8H)EsyzM3Xx9xA`Vb)$%WHbP2l*m zlmA8aYODioK8NJ9<6@8r*om&KmH5t3$+)~NInO%kFihsH)v_ku`PX$x&BB_1YyO++ z_1|j-sQ<5P`16+Mh`aAjwLNhXuQ(t8Yyg8jMBQIwM6(PKjg&b3$GD|JQhvaPq5k#v zgufw_{Rw3dFpRb72{G>}kSqA43g5ue8O|;#=TGX=U=96aSmnz8m@5x z%Inli!yW3D5{>)R(-k1bA(>3fI4UHG2xgxSVcyx6*!Jl18Sv{&Ld+su9l6gs1Rk@}h*ho=BLHL?x@_7jL_zwp)eNlhbZUAxdiOyfSV#{H zzqIEVpZ#RiiO&9w&yRg8B%26(d6N(io>OWyr{(a&TQMB=F3{@hvPik?B%_S7KaxShu>_KVt>;fHCILG5C6q$w3cm{w3Ss#?JHTYY|*}V?!W}7#LRB06t`}iax|TykcT}rjqer4C9fm+-xLbd48U`og}%Ma(-m= zaM=sC>g2LhjA^v*GL#=|^y^{!|66~pPb8MsTk54t-#~-)w6xedKhb972LtG&Wybrb zvp3_D%KSzA`cDYLZT*_g@91o69%O@!wJvQaZO~~-Tm^`UxYN5zp3{^zvVEaM0}eqM zupx~WOF__q@C0fr&1Yo8RiO*^I$xNb?_?~~IBc!TO7y!8|HVsiL4<59@=G;#?9t&- zSbGqZl;rF@!tO9x+@WEJfy{`FOh&WR(wh5*N5=kyoDr#Z#@tPuAyAUJF)CL_aru0! z;23yiG(6t`7OwW?mlfhNM8z8a{Cj|-Vv5lN%R3?GVN=T3@a=*v{WGmNg+eBVI1XJh zmZP?RawYE!y3jLCgsa8mY=QSE7-tx0ar$R4S==g?{CRN5Yk7}K{t^8iO;&0DGy3nz z2R|7SbCYY6aAh!^3Q$I%Xt5?|`&@YJTiu(>+z`_XPx1=dsZ@z`D#jrElcSJ|Mr@%k zFFFG5N0^>>Fr2c(Pcq23F=_7=aLO|_KWfII@8yngJ5F&ROZ{jrghNgtSsEIEXTGq> z7~K?va`|@my-+8?jd9q$!9!1_x^Di)VSWqsTBa)HaDH5ScdnrvMAj*ZmW!LUqQMb| zB_eVnzuw`I@JTX8wUzwV+_(d`G9x_i&=Q%3a+qUZ%t+w2XXporG}3WJg3B&4f<=_r zfjKjoHS6_ij9o^XRdz~44e7b%8*4hRa~>SUka`6i@`A1Iz^|`*`%7FY>_roK9?on@ zPWbeb(m4Y$-JZuL%>QN}HBD1*9Ke&;_|bHFX{gur@3qy9v_F9v0N#i!5$8qCZdt+* zF}%#vTB&%-%f7o%X6HSl2EdCj{ORU*YVd$x(ctoEf_%fX{+h4x{prOPDLkvhX_7x6 zT5^qun?*;rcZ@?uyX{M&rhpe+=bJpqdv}T-3Bn@BcmIcvl%WgyRz-%@vgw!2nbO_O ziu(ty=+a_5m{pDAKdXj8n;;pQ=@(GMtyU}|V#Cg?W#PACEl%1pFYdlFt%W{nEb5tZ zLS`#pHb*G@rnU4elY{T0Q*obGnoW@}jp?E!sQzmLHXc1>2-KUOE=rT{7ll{wS9eRa z5BAg0#YjEdoMW_IT`_;^M&I%K=O}UH@iSI;7YhTE1<7pHfw2@^?t9^^MunyA_>A*t zH(+8*`Q_RZb#4G9{JFR*;H+1B zmpD3P=&`M=CN%{p({iszuKA@2)#?inAXKkTrIS+w1{t`aIMKo)1Yj_ynK^8Pvfsyx zB!D zT?7a9N4&RBkS88lb|@=o99+~V5VP^sw(+V&Sfr%;iz%*y;2TZPeA6N{+{Gt}u2-v- z1QGW9rx@V0x5Z`Jz8NC~t~-r{GhfdWGC8%v9S6cwY6t64f9aorb$r(A^E1#$R$GEy zFW$86i*DH1vjI&Va+1joqo}4Eb*7`3f&>0gY7LxJiGxZ1&xxLu<(P}>QhyML;C6gf52xolA< zo=%nt)y(=;h=tWrAK61I%4zzA&F25-bSJPsKrD*c-Vsp2mXVDpWHS|d??~c6MKiRL z2qj(+9D`B-AN`kZ>1#?w0pf+U48Dy05tkXQq4nE$11siAEw}ee!^%4)E@?`i?C3EF ziKSPXPt+oyj){0i?z205R+x}bNyyXJnw(L+6rM=?acm>aJ_NJr^aPMjRm{@T?GY-5 z!?Eo%=j~El{fe94tJmQI9!3UZ>zy4St=(9~QO2b9n%RN3K-{GHEbj-HooxGbfGXwr zadfQ&Y3IfBgqKk)o!Oak@z&oeN~(d~Arwe5otR~d5sl3s4b0nDyPj?j3#PVxj9?y> z$>+D%&M_Dt8_S~&H|tM3Sy7`ufLI7)efIuC^S~+vZXz68Tc;_?M%<&6Wcc?s)J}^k zm1P$Q{d7PVQO67I-C&tw7Dcg1@rt2Y#0p}8P^M!xfwSZiW&d;(cKtq=i26FKLY{8d zouq>gww|rYb0sm&0NMZTuDhEyx0FD*Y^|F!$zE5~J}L2kr@1TN0@=m$1Xcr`Qw+`2 zKZBw!1v4X@j^}P>Jlfn47s_Q~j)zCEFFSCFO{)&l(_>Mlte9$v3A2Vdkk8H)IpG(-Or&HlwK$9wUsWS>0@Iu<*T~VT z6^r{IMQ~>8&z>HoGdI(_1`0s``I8+egFg~tY!f7W70X08hWZy3_U>HX0~%dz z9;kWr++8>sjo(jQ_U%451E1w8xhfBleSBdvkRFe&W_@4a$n_|a*@v8EFuW0E~ zu`<0y!`WPl>|9NGv4f$`#msafCXQKSM`ctdJd%+w)EcUxUWN%o_B8Yv<=t0OLSe%z zc8jZv9hv0!4pa+s$fBL-rv|OTtSPMmf77z-J<_M@e3jTL`Mkwwr^_z!5;qSGRdf_l zh<}&}N_J)U2!5YwQRlJ|9;qNArqjU~EsIckuL_3m&O}AJ#OXILu8WIP@G}aP$WpfGkX(-9s*Bh8gqIT_JyCK;p%oi>Tt&3#3F|eJGZg{- z_mLI<6YoXAA9ojuFJ`+}{j?A=s$Q0lR73^=0GzliactK+$dua%G{YioZ=&@xn!bo& zt~MC~j8qk1c-!LII3L3=9meLz3NhLa8^&q_pF{ni%;49gDPAF(4@82@CT3<<$Qlv$ z5P-wK2=$m!a7+5Jwp0Zfp;PTdS4hT^$nY>GTBsm+u+dl))csni#lsH5M44r|sQ}>p zuyZu2Ky`?sGh|Q(P<`(F$}46ke2M*jx8*er&NZ#m5%ZjS-soEX3)lGV2y8L{-3eb) z^=;tZ2Nx&l!YKfo?;ZWiT%BKGh#W>S@;bdrRiS6g5mY&KgmM>0Pk>|sz5MspB&-3f&~JJ?GK+FAn}F49W38_K8Iv9 zI?tx9OfCfP^+h2!LdXpC9{N7E`jFVIwy=u8;oJiM7FGB)iA|xy*>wOYCf--!txiu* zW2*yTg-IonryULXbdKR{E49&~-_{x$spIG#y9X3rRd|;Xxk0o--1yO@9s&=|CSMN| z791J>6F+rye%gPG*_=f3xXu&&ZSDZH5Bzk54-Gb!?~`E`s@B1!KQ5e@JFh=Qha(w6 zrbe9{+k5YpG&&tcuXfed_p>YQg=S1vlI2!tZDuv5CiW{wgO?#viO_fxx{&ow*&Shu zVm9%%Uh!kngn@qf+`UmXdd7cbX-cX==}K+*`s4~o(7g$zC_F*6UZ=b+AFz=MI#t;XORZOdRsQoLc>W)T^s5R?(E2_F$y^0OJ zqd^_!96=um+8Af6mo~}})w@#xVbQ=|v9$h3rKS3Vb^tqaorqiO(-qjEawfRTE3pN< zhwL@CsClsr98vhns9baXZxdGPt8ET}`yPXl&!@^TvABGuwILUVj?@HG%`v?u7a2=KU4{X-;-<=lO$bJu6(H8akrSJs}((Vow$td{jblo zHo+-1o2@-A>ItrDUWbf*+&|b+cj~sFV7$!5WL`T*rdvB_kE2GQ z>i~nrHfr;_sF9vwpa}A;=f<-h5uOh1`nPluXczmg?NH!4Bw0B-Maz;`h&u5<860a2{J~fJ3zV`I)}4d{%=3A(~X&HGKgOsDy^M z92YA|5moIXnZD;O!Cs6iD%mq(Yo8H9J`jFq%n>^=W{Z7o%ky z7%6r;gd54!hMbO<cs@NIgDz-|4@sAQh^7&IlBBQ!Q(B97^`h`D|(g2d%>!J|P!lgl4ZZXY= ztXdoGZ3bd6>1V*}F>u{Q!sO@9lumD-E&dIUNZ}uC&!s8`xD<R~joTA4{$;fC;Njb)5uRY%&t+LsNUXkpZNT9r5Fmo(U6$5*; z)aAg`IWkb}=@!x2eYz#|#U%Lgws;=(Z$wMJf95-+JoMA`r{LtXH3jNL?^R%ApJeAB zV9>3_3`wYmF1 z?AF8029Lxp+xB>sT8sBu{NAaVw(kK!LJlr8r#S>;2IXz`rK=lZ+I}|j;W7IDT!@VBT`PBm&1#2yJH344L`H|#5WLYfg3RS zNHxQ($01a5fg>0d5Qc&@0wS?jZVzg=5T;@&IdKfetIUH;6;SDj8cpLBoe)tf9^_Oc zP6J_BxCk-U6Q1bdJ)Kr3TE?M~V6e`v9Vu`F$U(ORUf~_o9eaLzyguCB+RD`)q`5GR zX;<{B;{R_25`26DKps}^tgnKy4+g!@^{gMhcTQTJX7s*2OJ2K5Je6I=~{ z6U-CBZ+K&D0^#Yh_9bM)3)}eSE5(2SjU4ai3{Gnazkz63HymVDu}t*1xHxP=LMxSV zGZJi9D3T@Xm{<60Y%m7sMBBcT(hciQPq;b;|M9~C@ppHfaQp+x2^@0gUpFbZh9|dzsR!&z+5M)vD)nKmENDj|`P>o!iKzbNHW{mm|CmlQ{SXyqTQw z*{^j8inv_=3Af=)ZS*E-FD^}*e7%1`hqvprsMo#YTKj|R>Q;9Ca?&Z!YWTqA{!^UG56LIa zKg!S6S}sssk4?Ux9SA_UT^n+LZ4VkO^-0FihL*fnZa_igg8C#eD9fyXjCA{FJ-jpU zxJX@voAG~%C5lTMTG(uU$8`96*oaGl!cEo-ml z%BrTg!$5ZE?w}eA#B0}S77JPRm|Nh=4&CeTEVlGmWQg0%;tbInx`_{4K_VVs7Fj7u zgM|nAc$lUGRx;P-;)(+DuT8X?XE>7zS=4F5rKH`hqV-<%} zw7DI3#H-fniCgi{J`+=?O#Uyh%J%f$T;2S-g4Cv7u5FQL(F4}vd2@MtH-_uS#; zEq3YO9*+?dwpA$^3^h52cS_@l^gn0GA+Hl6>7jz`=4kW&aaKB*0B+c2YZ1dPsMKQ%X?txivimK!WIJtpta76( zvWtYkN$5)#C6=V(<18&>WK>T5k9`L8k38gkP4;HW$EmCgS8Z4XC1N7#KB1QDcj=t! zOJ+@$a>4Z)34W?n2qf%^7(t>K=xeU^L5JI~%yM{rv!}87wxk0ZbF2mzO6jyMTn=qK zy5${J+b($qTs_EIbL6`(e)&$k7xWBbBB?OnSNhpu4?@0>ClYRrgbBW5&k@=c`6 z(cd0(ZeK{!!Ir&CIQVUEUa|~cY3#K94U(a+m={T7&yqMLo`(pkj^|9OEaP@i3$~3- z@$NQ*jLe&DMT6OVEk-wg9oXhUnpqsCO-2i9bl4xg5%Cl;BCuJk%lQr%Ro0r_vZ8a|HJCsGHUE|NX_N8;!{a&rGF(-ai2ocvsFSx)7gzUrwv0eC%tH7 zMGPz5PL>DQCDezY9pCR#Wql5`0%>MAGMb;rE!hdz%k%C6Sg`rJhsXZ{7pY{vY zRpQV)jiv-EnBqdAY31*`FRjN!VTlOnBpmOK1iY5Ho-0OLBd$!c!rJ&d;jbG_P3VyF zPVS{ds|stn60H#mJhrcInwEZ5stP=6WC&TDo5XmQzo^-A47O_F{WTc|g3eR%lpmS> z$8UyezB7kQ0*YfYs%)~GBf#zz*b}8-{dnQ+fNv(`bQlae9_H3axANiZ|#rLA1qPpo~eRVpW z0!Uw*nS73<^Fh#$CxcV>_1;pnD=o@Y#P9TKlf^-ZSQ@6B+v(i4#j_h_YSqH}h^AaE z`rmow8MPOOw3UmZ^U&Ek?F)DwXIe#>FD@=WlJn?y@pF8`$AbG{xJRRh33owKXOLS* zr;V69Hd9}0%dZl!U#io}wASIe5>~2Roc;}6cjH{N_H2cd=+9}E*R@nfY_@=NhcZ^D z*-W4Q-7!t#1G2Tb)2VKo3uSZ;i>RpRiv6%t=mXaWl%FRLEY+QLwdRODgvnTswarOOYi$d-VsX{284@6$)cBmRXv}s&$a3 zo9j=%r>4))D;E-I^VxW}U!@py$ypPpYC=K`lSFf0HuyM5xpFxD|D)?IqvBeEHPGOL zdk9W|z#u__1{>Vn-CaTm?(PsEKyY^tlHl&{?(Po3$=l?dbMJd=-S=yznPGN!b#;AJ zU3Oj#J^FydAwo{OSY3{04{n{m`kYhGySH8jfm8S_SlE;<3=5-hMrxqhC{4BKmUXt< zU3B@QBCSKYQT|FKeae-qB=K?C-OKgrWv|1P zRmNe=BC=EO?2CTTMQNbgJ&yqO_Ur_MyBB6|zsVAF8|11Ls>Qb)%_Mu>otqsfH|BMI zIHA0SQTRME1Xp7+>C`|Umx>%_rm2_p>3FCNw|JZ)ctKo_Zr0PtW9u=T?6c=}-Z6}C z|3+ujjZW!e!>?0`hnv9C#fu$0QWq5Tb;L(1oWo?xgPF9(+MJ1PNiOS|YpdBJ(L6U= zAaPaTO5r4x_Gk#;&`b{I&r>^nR7easn-I$$qG=vANzcPy$Jx3$)|V!;7k4-^ph^9- zq{GXD;`E6OPst~4kGu}m^oi@%T*BJZHxj7DD!HnTWf`OBB)s{fmQ!jfHRXCK2P!lb z9qZT9Po^|BOO=aE?b@8SBm^tX{*$z79<536GKu*Y*2xh6jozQlEGmc{IoL#(TYaL~ z57%R{yuT3$6flhQx&7?1q(E8%Y-2Ht4p(p3QkWFpk&(2FS|~aaSQ{R~vc{iH7_sB#r!Y-U zKSb+L_az+)sH-JRE*i(rDx}Kd1bhKnf4xQ4N?L6yLRM_s$f_er7X$a0-T8*rCj^Jl^C0jlem6u<&4T9J~uoy%{reuJC; zJU=*vsfxlakbfyKKYsedr=(L5=X^(mmFF%uSe!Vfe14HP{fE*{SAW^zWv&Y$po zMUnB73sJ1tyx(Id$19_k7sb=>qFni*qgtf6F?o>OnhH937HP-fQGI&ROr?MXC^g4T zz|!+;%~`ak)myJ=Xvzx(==&=N#vW3bPRf}5I<;U7)tok>T;s5TcXA0-@QYKKBMwF< zEyqcFS1)J!hRrfAt$qfm6e;h901-dKr3LLMnkdjl`__b-HPQt$e;-xb>P8@v4-V*L z>5E0D1yGAT99-viZAq)Q^4#5Bkl|&SSS;EqpxM=n($K-U;fPR0KeeY{jQ@vPF5Q}P z^A7Dxs>8?*QV*_49x}SZnAY)4PlG(QRJJV>b~;nmb@)aaw{tNY-GwNn!kOGTj2=fb zTAr-V$BAhI%;V}WD^y6yr?DgD-eB*;R+i7G9VoTbvfi)PTf9$}J_g&9-mq%del8b} zNU!m|`~V&CisGyofUmU@K`z5l`<<)Po(G7U*?i+ors7!m>th=Np4I3>PN(DPlHVCjHLjkQ-LG6f zE>h`{8qk)19>-Aht55%4Mxb<^9RL1MfYPfoo3KLRgjF#jtel(GXevIq7j)hpYTV-NT)PevM{d}!bZn&6x zO2aNA6-RFvw_I<}as9}(<&P~KLGY0ue6hSiq?*7~O6MEQoJ{Gaf4J)^Fm#3N6WsJka5*r1VRFYhw_#v&6hBjO} z#7#1mcCqh$H}{l-`Rm{P&0?%SMQv3~_CGo9H4DxwyEs-PIh?dfP!LfF5LxoHB0Qb` zoK>r0mh=mLq8d=Oh0F zEJ_J+`&tG()0wi0;FQ@dFjcS!aj*l& zqpJ5y@~A)em4ZYy$NT(TARhk*-DoF56rk=a(rP^jEE6ZcYM~2h(c5AyxNstz4@UY< zLr!n~MAzphXd%0lqh0UyzOkr9#YS5zhG3IglAmT(V!Ou0m<~xcjdDnER4;#_&hDXB zw*S%p9yLdX@E5_|W{BC&!L=019|{Ac8f)go=0=f2D&?~L_ocFD6EKEO_{S6pK{DN% zwF01+%|Q7)TD{_A&B=H$#Z0*@pX%H%SA^?BHuW^a@r(I5de=T~W8D3=%SQFq#%ia9 z#@$!%A58NUkY}J^g0B_W2Cf@9nC0VBcw%I5cm$h+p8NB^L(i@5{wo?w$5XOEK}&A{U;i~~E57Qj&VYa3F)p0RkcX!?fWE3w=nTml?J8CkLo zb^yyhdq{Kq>)8B0vSFLq?|e^8u>gunIUVPh+wTppJ(kNder&)&%&>jW8E@ZHW~%(X`AiwzwkP2 z>2Vymu%RaCI3B}I9TFJWxHxkpbTFH90Czz+Zy?&x;p1Ce$Wj_D!EtDHq>vWt4(V*VnuJcVl;sX6RtwL-cU>(dR-qV z&`y%F)GbACttiD(Q;1+p2}VmwOIk8x=gv>*6XczHHHjN_bsvVu_$hHA-VQI|y#tuI zJxM~5Y{sB^a3E_GXx*+PaRSCD9!fS_sIizT)#&q|u=Gdif~}D&p3v_^Wy~QiB2JRw z0^s?hy{SJ@P^9I4YSDe7jFg}RC>Mq^`q*ZT@V{CB-27zGXvVQy{WjsA*4DK^#XKs) zTnsV72r>(&gDHt^M-wki6OVmnY&b_paeG}1Vt)NNSvBvg05GT{J(aPrME-3rB_7K> z^j#hn)#FHr-_9LeRTe96NL2$2{yU)0RM-^M1nmO;J4qah6j$jVh$tJO@skpP4Fs_z zcD2Rfn}DH0<^gh%{5H{#52#J1+dR=R#7Qx5MKGfj`$IP$qrda(ev_rf9N;7p$cCc9 z7)9ii_%ez3g{h1o{xCus0DmdpQc6TC@eUSjD(^fNY<-@0NC!7OW&z$yqXx3dryL!~ zcnoneZ}k8G%Ft17<$s4obucWeCh=k+CfCzTl!VB=VJZUB&|`wp-umtynlulWRN zspYlp#kX=j{W-Fqg|&sT;1pV2mm@hjmxRut!o6CNCG4JN7lj3e z-X`9}x4xsin_a@%8q9z|){PiAm#BZJUQZE!{1TAaLYtPf_I{JO;CdC*aPVuWQ%uwc zP(M6~p{S7V??JA$o`)@q;y|@`W?C6Ad$d|>>ja!sZ_(Zl-gunrIJbXYY!HkrdY|Wj)`jbS-yjn>0g!XqTs5nTX3Va z)5Bb|mEJ(W9vVxD;9>$c=IBeCDX4a!+=D}#)AB-!Zi3JzPi{rCd`fQsrfwJREX4JM` z6>5$o@hV^_>8fq(xPF$IDx4PwJL+*%848g)QpzM#Dvtv@uXxO*SA4thvpDSL5LNm1 zyS#WQWgJ$Xkd6{n6wU*hg|TwoS6^uyo(4C(<3#I4@d`*z37Kl(v;5&v%VtgfsCA&x z_S<0^mETviN9DU(3caM+40W6|Sc=OM#Cz9;wn4Nl_6G!9RbL9iY%2|4iA0|NLdsaN zi~FAdP(8y!3U)&ppqznfFBgjRKEV4m%gTUlu}IB z>~<)O|8o#}S(E$Zf;*zx^2&22+YhO6GfA_VgSY-qU!P{N{rds+F6MrX89Z@e0Tu&v z$~sYE8)M>mGooN;e15Ig<-+XmBhn{>2Z`l#R6V?=zPmd;^#X6(Ehh`q^6r&w9SuHBEhmZnP24}Oi zIBW^`C1S%J7pG!^By?5yFX{EFzEMRa6&nuG`T%8X3vAWA3MPUh_QYH-JWD$!U=-K! zR0s<%O&`~IlRwy}O8;K4gPA#n5yQVWcxpM)T5R-qqFm|XuTZ!!q{O^qNu>DJxtdBzk=b;uT)$}8 zmwbNnk;PUaD_UaN?E`*tBs9&2)O=eM*B+n8+9Gc~sg2q8$ySAHgU;#CD1}NXmbkQq z9nNq?AI%6CJ?T#$@GW#7oUR(+LQyDNfvhO-bt8(PwY6s}-}hY=>K|vU_>h=iMBJvi zna5w1Q5-u6K9CYazHfl8h4&?|PoxTTPhxs79_*NYrpQ@U=C;j*>E zZqHTb26C`lm);>|ljy)<1~f-P=iD+I1#-cikY4vjs07Fv-?rG2x-p7?I6(VwjZOtp zwL<9>dz%e>&EWXJG5;jce3Jvsfp+~ju3fux!V2xyXWF9>2bb#;odN0|buJ8f0b$2k z*A(!c3`&#ZFlUEg2BQHvlAsJKVfMsJ?~kMpANH<7A`PueB@y0{=s3dDg2}|YXQ_DE zya2<+dB=rfgoi4_W zB(;5rIg9{@JT}}J1TcG7+EvgA2wr35^9Vf0j!1v@Jl_Zun2DLiSU|j{&}Sg-W1Gti zF3xvIl#}Q5vu8W0LLtQmZs*)zK<|qvPM09uEKi^H#5xI*(&dF$s0+BqHM=lNGr_4Q5|yhjL> z=S80&PX?Q}oS3*@vBgE$-|W!<7uH#j$20$8l7TqWz)w2MxcGLby-&&`A`u+YJW|#D zeVZYa5$=e5#(te*dg_n^v{AxBo6}WN!4Le&<%N-FcbB`U_P_6&%&I&2gNP~GK(~a@ z(l}SOn8)Nc@T+nht$=1jvcH}JjLFvSKYtnyecKeVmd#l?2}m;9x125)98Tu=Ca_tjnpi?yo>U97|9K;K9fw+1J09IC zXxJ~4J+sy3J=B-^2#rk|$tf^x#)(08rn_)^YdWNW>pGdPgP7@PsxscCRadk9?zHt! zDEjL6iJEiu3k>%E7ezk6WED z?399!#LP{ko{xZ!0cA8xNX&e?_*7Zj|6Wf%$oY78EeN$^XmJgr+rqeO394OgRjv-` zA>epCBj-WsDKc%suC%0v@vausk?Sy=JX0XYhj<6l?fJ+>CG)NC(}>nKvT-(9+=D3_ zQSQ`Sc6X@2v}jF@k^xH!ifr6lxas6E1{~!+XX|5K+&>a!8*yNNna{5fcN!FpW#qOW zSTn)nD~j9tD{oA{=2eqJJyVEF+NYouKq^*a=1~HLz{HZl4#Mp*K2IT0@-{dai3PMGAysCbL|sAo9=47LW`sYC%et>qnXYS z54mn$x7&c8t2fe(9LMr@4Ii>10xZ^lhO73IO@GqJm@sow3tVSTAszX~&3&ED8j#)l zBZlF%OuE!%&xrhXD&|`iD&!~#HryX9TGOA!9LmDn4i$gabd@vZ`5Mznc)vv4|0a^Y zISp*;71%g}Qv#R$HW+X=TxQ?c?qw2}d&?)8#3WL>xX~jt_WAAMuQJxcipaiLDjUmW zc6Yz!hU1RAaODb9Y>gpK*%aRNm0@$jUFs-O#EX?T3-!DK7-7=O?vB|ThQ!(2(Qnnt z)Jw7nSW9L0WvtZV_u4iY6vk5CsI$GR3iUIeTzJo2IGPw*>{=8{oi|SbuV5Wn=e$0^ zW=V|$V;47Koqt>Jf}m9<;x57^+;UQuthoKH+Zgj+jmLJEUP}gh_r0Rsb{I}J-?3)# zkNHUjkESZmRVZ9f^J7J~WA8so25KZ2PzhzLxm9eK1Bv)QU7w{(qv+VIa)!t#HqS`X z3eTwPRq$QB9`9fsiMf(bLeWigwdlp;@w-lO=xU9RZ&*?FzRe}*d8}rp44hBZ!u-r_ zFylULse*n#`%}^cH^^)t=+vT+2|`bNfInLrO00oF<>}6-ElhWF5<6cgk~A?@q$q;x zv;|m_=@@W$NJ=BTyNvSFf?0OblT_Bc=;?NgM~W78hg~TV0mDlKK71ZX-bSZx3nLmoSMdl1WJfLouqArwUNU z+Ho3QHuL*J2Vo(wS2}XIoay_6@_PkD;8L>g;c5C1<^abS6q5hk?F5HarE54U{c;4^ z0c*+rTQtYcRK`AN`PoU1@o2tJOPu~)aIFQO04(i+uhd1eK|q8tJy`p!8x3egacHjk zJ7_zPKkz{xf>^ghc*GGfWz;YOE-=MS8vL=y#YxCA1avS5WWSO5c$iM$_ribMt35eMbXOMQ*N6Ot{23+zDC0m#iKBtA z5IWrNa&8^2Ef?yi8lVVPFkr}uYJ-ARA{HCsh{*VwM)!h&*q%PwltT*!D|~ny=4+)H z2%D(Tg`benx7n@d^fCC?6VQ&I_Hv?7SQzI}z9W4Q4-P{qr!JD*8>t&~yXylnL9Yn3 zr2w8v(2Xv5=m6~|f?Oo*&5iI>Q4W{h1JjP)}^gXqGc!wgO-Aip9CO!_VeatJN zrWlDwew|Gg;&CsAi-nhFjiglLGIM}-i0yF(WfFO~A%+PGYUo_RMY3+_EDmjEU zGG~J*@HH8$+WuT6JQN(#Dy_U~0UWKp>f9Utpfv_a$XI2#c{Z7DX#sExe!>Aif{qI` zQ193P%<7pkt=UgHs1b%;KURg?gWt*LPgQuA{sGGFOVp`BLDFi1>tQy2cx8O^Y!6ni zA{jDU;vPcFwV$fsoL-cnB90?b;>Tn!%e9n;q=jk zXfp3blSQO)+DT4F)+AN;ghp=F(V~&)kNvbKU7#LfJ3PtJo~KD6Bjv**eB1l=NwD?N zLs2e&-kw^q;p%j*dmos0gBT91Y3n2Xl)t9q#5&6mhWBp`uTA2!=O@TSMaNWyL!v+O zj$9?h*4yKeW9}=F3B;2ml5So?cjGV}nUlf}4h9bxc6}o9E>TWaP!;9@g zJ$;#ZG)6`zAQLGIE)7$l+cuub@V8I~WGS)38Jq9pJ?Dd_*WJA{)5=Tdo76?F;a_Qw ziVAcVca)31J)Lse69Z5wtQS?bEVM{ns=VyQqNyx-V@DO1z{RY??HDt1T-%H+kbgkY znr%#)uW*;Q+tnikTqGId;}Ckw&^V6VC<8OON)s}>c_#OVoowH~G|jFJ7+AgQY#qN= z`ZcVZJDoiDm0zE=v7|Q;ODFi;X}s^xNr48%lTL$?H7B_A;*8B6|r>jNAUuJHpw{{i>4S|AiA5Foj3B%gbEWwK) z(Y{yRZQ5{AdTzH$9njJL->9jLWGl0zuOF@uyAkRo_RJnToZUU&U?uq zyb}N;B`-5G-w-%HFE9|8hJ^3@3CM1KdVS365hrU-8r|k_8b)JuBpwRvOr~neh{M6r zM+&YK`S6sFO|2ipC(n}KFBYA?S+va$eR5MwC(<|`AcA-X9XRfN!b=bXn|jCH!5C#c z-rAb&1;Yd%PjIu@h_mrAn&1^63{geil&bUFibNKhozIbq3%7fBHC_f8_0AWKo5;il zg&}UtvIuZ3zXQ3yxFUncJ+g2TA(IGBb|CFm=cusjqr5n1OawvAqSTKlaILWk43n0} zosPK)qnFH)787rDxy`5?KYlP-vfiLvaM&uOMaD8ZKbi-Bb-6*iNR%}fT3FEKrSYXo z4D$0vMma#N+FH@G8PzU4QF(pZuY?qTYtQj%Le;z65K+rTMEedM;`t z{w;4>t7Gi<;{}SGNQyYvEU>u~b(ffd$;0pJa8cfm69p^_HRorTjUUHEFj!h~U}fY| z3xoxncfxC#&UV<00z>)qcz}yZj+-*sHTq9OiOf^^GK7uvb4!Qw)dd1XKcs$ue2hWW zUmETshk}MxG46*Bgg)p%R7=&z<>5PaisNBuvUQu?R3)-DGDTr=Av!TM%2d>tWaRpr zy41`AM_m1{!sy8slO)1ERNR3~FfoXS&@A1*l^V#h1QTPD4VzM%e8d>!dpKh8Z-d_m zorZ51qKkUt|4!=ro0RzvBg;9id|Zq4VrlZC(D*`XIl@iA?Bjg1U$b8>?^Cc>W%cFJ z@n@r{2@7mNxuS2$mJHkG9-=)7rAY)vl*hy13#gsd4ES?S!LU(8^EvKXE?qYrnZdsC zJ)>B&$tKf;wXc?^i>E|4D}${^7}eJ4R^IWZVs|<_3Mx*ZLzp%wOdK%d5SMe|+qYEu zdhn_r>4p_t+3p(`xtqnRhohTKcHnSR59;8EgLB+gSxU)k$gaSK8&{+IsnwzwQ&$O(w~1-@2QC zhQ#f(%$uBu0(%^jrg1NixwS~gw|yhGZQb+9AXwd}&ZH13Bxni`W&ejP2UEZN2Fb-eo5mK#dv)jk7s=ESvk(^GL|TBERa-As-Py9MH$=b zBXcJum*559l^lRqW+RWXysae01@z#`sVFkUuQ*6PrjC##gxt=rY3}nea3;xjb2SpY z{xvrXR)1KiL!GP2L;b)3t9&e`LwSMWssY8FL<$-h_bvCuMQ&m~S0ZF!=_H$x1E)Hf z!~Bj3!95wfW>;B+IB)nfbE;-;^QYcvz|XDW%AZo*j(<`fDfoj92~65c)XajkbR1_&)SJT( z%LnocOQ$KZOGqpVOtAT@a_L!6Hf{?v$eK`~Z)-jl6oN$$`>wU#wShrSMDI3-nZ_4Ma{Oy`6_ECO`KVgv z@~!KiuImjZ8wBlck?%9QS?)(40835Z0bG@-(yd;5y|K@YR zkQDuK5P?-+-8PJvn|1tB1q*JR#MX%HAtZ+p18)3l>!0Ufs$o58d$m&0v7!>)avVo< z?a@z>d3c;py&n&F%yZMKGSeBJ$Pi`sW;f27aW1<%m|r`VV6#@}q!FlA94XJsb5x{~OBmie1-UzFrOEZcVF7;DhW9Q^+xMtH zC{p`dlA~A_Yw|lZKBVNR+(whx`?-MN_|(dCUANBVRM?nvvYF|uQgyp&CczjAa6hGjSs zExiv#d8uD zMq#(2^_ig9%X;qj$?~G?wGad{RNhXXleG2j(q|mW5RRRVIA)1|ZEvz5N+=LgIOt1R zkP;{SBwy<(TWVbfA{*-*!xx@8Y zBAU$iBMpv%d~z>J{5TGjV;2y-N6;lz!sn+v(cow$WT3zy1}?D+28T4YVR1ssm0bF8 z`XA;<#8Q0^AaUg*GT6ZAf(ILGd`!d&HcXf|CcbE3IZ<`|LImi~0jN-b`O~1`z>~vn zvtaCMm>3a2PNr;HmL3ldcL(a}Yk!QEID_(;t|qk9BxCcvGE~j$aE%Evi;VXVBR*L) zhXJzbuj5bVDvjdUtW+sL-H^g+2?R&R%X;JF9Ey`|rs+_#mTBJCH(yG<&pdo5;|(gQ zz8N#?Lsd07kY#BtKbSPE$eM-usgwHfzy~{G@yY-A;*u_DfBsduMIP;`8v({8e-Fv9 zN*05=a)h7Qz%E7tC-s8~5}WPHk~#a#S4b#hrNb`64%k16lX4veDalmCbgP{WVd?r1 zWEx^;vyZl=S~|MLd0=stQ%3eikauGG9>rfE9X+YfSa&E}IG%l-;4qZ@QYz+uO_Y7gr6E3jIVg4-h2$@CD z^giH%BQ)Cr>d4xY*z0i(iB)H3N_T3a9rDmUMk5^Z18wjgQt9cU)N2RbgR9+9FN~H$ zSK%8D8cPxE@n+HagzeZ)P4M~t2Cb}FftVD=W<@Ct7dL@h)KA;xcO9CAd_Fj~2fRSHcAzRC}#@)@Pe=kisE5k3P~>>-!a7_I(% zsPXKkL&lnw6Kw78mD>|ArEzNB!oby)VSk@|8mzUBVf5^0X%)&~rxBA_sis1vSllP> zHGf*WD=wuF5uos|J0;Sn4@Y~yEGc&aQ8dZj?fm8nmOI|SVKFejh#8`Y8YSh;BUYLj zOX7vhC;b4ZG`N$uq=!@R=GvyeDLKtODE_x*IC#%u-A)e7JJy1y8!&6&#r1>Y-|i`!7oY*2iehOZ@S zg&ZQ*M{l&uwZPTU$vA<{y}cEotOKa|AA-xp%0&V)26+dO^7|ok^`GcaPsYUZJ}Tk} z!PKmf;kv2Y$N^b_6cCM0s@Gu5Q8_Y%ANJ0^@tPZE(Q!LcX zfmXrWF<5oEl(DbET=_Q!Bu^waJCiH)%mo`pqoM)>&cxk`VRm*) zhqcVB)>M{U&*bA{NNSpY&do}cYgENq++z;-4LTjOOW$c?60?BX_k;-}gtl z=``q3d$~&D2=g-Gwiu4;9^4EYN*R)fM<3J16*Pz-aZ~=@Kc4qEqV6QREE-I>Ht)<( zI6|nwp^6{eae7Qqc-;V|pLI;(9+eGM$QNj?bf2bG7M(H@fsMZ-B6iFs`@G89<~FMG z{l>T@GlJtqv8Q4-lXb3Vg0Yn*?CT97&VY82>+Dv5W%|(cj3N%Q? zQlaKa#t*zxkcL1&m`t|FY(mlW0=WSi97R*PyFV|4P%#O8`ZBbcLWWIK72u70@5fca z_y(7+VbR5eMN2L@!&tE4@`_BY$^YDLMg=8_43S18vCtDwpGrLPp69WKq>=shsSB=b z6D^jNWST{o7E62db857tE=RT&ZObKA{-u9{X5TI8+2_H>8*v`A&oLEPMb`0(&iTO3 zZ0XwmZ;XCNkWYbd`;Sz((N{~2pV2!#iAKNuSjxCIQ+&KVk90>muy;Zic0Z5kwuk*= zr`PdBF#6UEw(-Z+WZDOL&K@ynvZq(*g(WRvyTRNx;e|Xvr#@V&*01=IZDb*v8W`Wt zqY8gFhgp#|+4MHG6JU0YQ3_&_SXzA*m-`|UV09vQj*A}`bWSApQEfJ?@t9t>iPL7K zX})izNQt@@r*XM#k?=UdF~C^|q{#HS*A{*)k=NbnoSZ9c9G6&%MPY2Y1>TKs4WWuu ze5DQ9N_EM-49B?H7f$(|KvQ=Eb}QazKIhrA6;8-(`-@)BFZ{MmxIILW{l@{E!P)O) zEubv`ZUYd$to{({ELx-_&vV{LLw^!_i|VMFxZPm)MD-3QKkVB(IY~t9cPLUSpW^H3 zR)o?%+J7Ypk8I2B#|n3r?_VOnPNG9FIG)I(yW+=qz2*ZY&CIRIoia#4bR;Z5C(RSu z_;yHBr~b<{8A=Z};Z+fw0a*i6%64dHu-REssCnoxYg9=$b+!lNX{(<3s^PU@AO~wM8p{SBdQt z0_m!!c zAWoc5(?I3(?j16;#o72WWPjEBInRX#Tj_|hBd+;2k^rAUEROY{2C-9sPbvO3bGK)j z(BXIk*NqnBZwFV&STE1=6`!u5_kDNu+C{yTgIKgpdbkx`Rvlx(NVICx=uaw;n+LR# zFL&+9M7@&PUZuJarEE;xw>$}nO2bB~`{d$t%d>S0!ggBnS%Y*|g3&=KJjU=pY7x68 zmcBp_hyMNHgwsKuqQmi=Uc2@oevf?b!ko^9ams+a((kH3+GiLF ziw917)5_6p8yE70BLFm5vB7cNNgWk9zZMXu`IsJgxX#o2+~=q;3U|JElr*OmcK2H= zPT|N_7(J)oGVK=kb#`VKQb4v-5~ZP0hnnSPJ8osuo;y-8>llZB3%1^F2oNrvVd`x6h&A-uV;02VJ950z&n`|7Y<8= z+w}=xybr9%u;HF8h#vFGfV3?h;M0A^S{4t`-eq8@-+JdaDX|aG&$2kFY*w#d*y?|Sb|t@qYdE7aX!D= zUPuJ*4d!gRzl={KLm6vBLj78JxLRh}f$*=D#M>|7Ktwn;oTqE9M~= zNR!FT<8nua!Ur9BXCzH3!~3~j@okIUCOj%BUn=0=VNfgAb~sxTz(*;5S%+m^ef2-< zpnX|~^6lN0K*%fLgU@50HG_Z?e%v?I$CO!t37Y?UoEA_>b6_@eh7GebqtGIv*uz8p z__&z(7U+R>?6uvRSO2pPhiH#Njlb(aM@8WPu;DQVJonQ|jeyI+yl@0C)>42xV{F=V zikHYi7fk(c%4c7BZ+Kt<#u5CiUjOBtO1bvOsT!A?W#|3pr-v}%LbF$P>&-VSlR17p zqr&Z(_?9Q&zs`r4ufSX>JVA9(0zogcwr=yRYXoM!-hTdN5}>*Xzh@m6FuAj}Tm6@@ zzyC(33^03i^sngbk-+RZ!F)@UX}8_N0;D8hQj26hJ>Xju{vE?0Lg;64oFO2V?|8nt zx4e@^F_&or2*NPI8+~C|&?neLO5CN*(Prumt2TCI(MSRjVlIatBAjQwJRn1ZeCZ^j zEzqMmz~s+w?9u-+`57+2+Iw^q?Z=?kFHBBPas2RP46sC0Os!_0uz>B`{fBG7_PFWo zOL!UU5umQxA_IKk9pO!6#I*i-rhf|B; zWHwP^8m0VPAlD+Wq(x1kPV=W_js32y&=W$+9nj%9&ld(kvC6n#w|oDN2-x>EF%Xl) zKoop<3c*{@8Jj*j-4HrZfh5rBbVbH&G(8XUeB0UQb1nSKdVUY6hM)QEos=R_l=c`I zUSqGMq9xn=`L1`*5h#Z1r>eo+7nIgG_!miYfg7@Rao;pKf-bxJ({NXZ$+ z1HQHP$}ZLq#urE$iE`*pS=(c~`33}0@(2LOH$Ai46$TUFpVTPYig&4-okf3{`Y)O%|XHuwN zOmH(K9e91_w2YZGec_SzK0*ZVkJW#9gfXY*_-~FLzzbY2yx<@-Ld^cQZPo`71MRON z`QjXl@R0+Q3V{p)v`^9)FGL}GV+`3R2Sk-|X49B76_Oj2Fr>&Y)X(dKJl_N`zVG9| zm0$r#K@GD_F$^$A6iPq_tI%Ik$ictIN-26Fh3E$VS3b2J3 zC^NF^Y&x)dVo_<%#c z@dLFh`R~UBcAkdW`dtQuA#8^)3t z7E3KhUMd#rrHOCgXx|Ooo05-_TjhbQ?z@cW$V)$!bDm+iJdcLA9pc=C02>JigO{M# z7nv}D*_T1D1mYdHPfDJ-B;897?&i1!7*zT|Nj#JCZ`_> z$b>Sxb#+m)Y#(rTk+ryp@>&jnYOZ>J#c$@#ZsMn$XK3#~_gXe=i8&5rh|TKyVRIj|=q^;M~(a z_yW$>Z{cxmR%TP{eBqRTh`27Ww$UA4>;l-TaT&Z3{6AiV`i}_Ko7n#-bS4K2lo`q+ zNGBIT+P(=YcxbRIPEJJJ1Vh~4`09&h%2st0KdiLuG5Q20HvFnXm+#1e**du5kB=RSPhk78 zXn@~^qJ!NiT`9|s?T##YJTibS#i(;TO%EGc%!+N^H=shIwVwN*H3$Ph@-6HINcenS z>A~dw1F}A(hTS)_Tu_%V9p-p_=X>O zgPcdt&*iLh%rTwO;MMz(@V}x2f}&%Vt^fC9;J-H7cgz3X$y;>b!50~o8{b7y03;Vi zDc7d^U-oc80QlbxL99Fw^YhE0Xb8vI4L5(01LL3v*(d3fv$V>Pz3*r0Vyqp$Dv;5H zHiFTR6V%a!5K+z_2f!2cBh7?wfsN=MMMGkQ1D>Pc?(EsKGB6}ez zzkk2ZIr7Uv!0W`pi8RjEmq)1{m!NV!Q%69DUQ&|7SabKMHn-DFXXHTu1bC?5ivUjo z_u02$zx)@XW@^HJzZgRKB3&igE-gIa0Rit}3a)$m-%Nlb?hEkKd;WE>C>b=cVtrV@ zK^6Camm}^sSi!lrT`d1D;0^25W*9o>Gq`(56OOB;ep7P(EfXu;g;2E9aWC>=lJVGz?U+=D` zKzrF}qF*hlEV0}kHp1lPz~RCSwjrAV4i$zN+s1v@<-7k=!~!l546*nJ04!hR0LEuP zpwq4e#c^-F1sK@{^u`Quxcw^~ebrtBm>$iN#|KfLL~vhho7&$T@d)9rTK|4x%&5wW zRNvkOHPH^pLi=Smq^Dm^v;VUDpRusOpEuNc7ViL^!i4fMhZxW)qU<(Q|Ai~qkH7&! zxxqJyv(B|(L;NZWX15`JDJGyJ~-->wn0sJQM>DGXGAv5$B z^?IfMwwJ%{MMU`EOP+*e0Ph4K_=RNVRDhbNAFbyU(@)GZ)9(N3W(phjc4h5_Wpgoqw4NU|&pa zfeb)#5P-nFdG+ss^nO|8Gn3>}PlzyZRzrjn4o&}~0N_h~q3RiVAJEiy^kx2LE_Np& zFKmSwTvjnXw1uWe7!zWJ9_9EKkS{$D{{r^8M<}N?OyvvTZWM}YX8;qnn}atT{tM{a zeE;GzEYZuZ*1g?Ea}5FHF$r|f{QqbUqHFOlPgXL4i~o1`1ik`>iJu4b z=bO+9sC3x>_7u=L0UhzhJ_=rZ9rB_bVu(#c9=ZV5B&z*UliW{iATSIWJX3%8%RR0w z!mlr$T>(My{P9!Fv?XxB{*M3ZG5~i`1b}ak=2^JBpPSAhq7iT6_&nVtzfnpgKDb;T zsBpPnP&QxBXswsi4Mn};k1?XjBKURIm+wdm}7c03i7Oe zH@`{X>;2~`;rVtGoy+sSrt*_i0t0b(D0Z)-0NzLW)u)G>NbUJ5lbk|@oCdH#+R;AS z*(l`C*m`I1KHyAL!tIG7T~(JV2K{D&CLv$5y*!x6lLE>IDNIW}QVphY*CM$u`rEIk zPI%s~2VaeFpY~Mv4XT^@Pl1LO03 z_;Zt2!b$iag&dJA;NC|u!lkkkwdXi|cK>8M1?Ro2gXFnD!{=EumG+!Q?}_G;+C3nk zHhUQnth(W1;O2ZiTfOV>XTj$FvWGV;jgs%3bbR9T?s|195V%LNrMtAsxJ$OPo??jB zgKf{`_GWi&5+SNfyG2Is6p{sN@lC!|_HgUD9*>n9H;qKbf&Oz$ih`%aTiP8Og^>+I&#pL#%;}@^Co`ou%!f4WA z&iBgZ#yfppVUFV67+lWY8J?_DoLjp&Jfve1ip~Al=oV z!$Hn|>;TuN4RqmlY{;{8)CnXL=QKBaYonF=WIlG+VROQy!Sx#cootNqR7|oi_F?r| z!=M@CSiF4#`1@_-rPtS?^)d!!*HVSk)dsOyM!l@w4QG*!EeWv1fbo^-6gcamAiR|F^`LG@1jTL>0#4>@iWcS&k&b!CvoUdq%|FTKVSZ1s9F z2kw-!CdSiM%QW#ly@jOBZ8jc^3BPz#o1cBC|q|KE(K2x%(Ca7ryJ#B zWtwRjjMe9x);~o|fNnhZ%Z3mY|3kp<(^WZiQofqOI0`W|0&XXIpf}tq!^-k2zN@}C zn%Ds}azo(mP-PA~s$jMa2&61BAJ29w0IIn24X5)iGBA+Cc!#3a0ToCUcZQOLe!e3g z9dr~Uf^K1|;Mv#NpUAW53_>jyFW0W$TWj~HJ(5A62_!q-zrlQDCt@{^o|Ub3KGi)K z%ksBtouKQs9Tp5k8VBmLfK_Nmb;b9uf4tPp0ifaq63;U6h)n+6o~teJ8L=&M+J96I z&JhlmOp@{t%$T8*vF*Kj)ETSpl{1%I_UbWpZo&!OAFN+-&$x}cHNPJ#POEWW;kuWy z-b!3X2!v#l>#61@p6kR0@}n(%+~ww~+>X{^+w&fv#|Nu*3to+gop^5b2n%q7mXT7i z?LYSp*`|%OLpw^1YkeAOm+JK{I95`doYos%Oykx}eG{h|VbH>rN27ZY;AF9a_IUB+ zrO9FFt6(z(Ygf3v0mUG9{_u=l<2dl8!TC)M8sM$Jxx#%YWuX>8kQY}>Z2Hf$OpKyOaG46f zjbb!lNt4i5pi<;4{TnBgqY8Ks#M|D1TVy%zn zQ@IKS+DnHwo^zDq0aZSy?~mO^mYQYX);gVH{*(I6G|*IYi@Se9zTnOKuCza0k`h5v zDw)d2%X&IJQr__4y-7KS#}Vzz@4=<4^CX3yLnUK1)2hM$Ibols2Unz5d;I9_D>?1wx@zSS%C=OaZb7Rm5AkPskL17u+#?w|{g?)!fC1`B|*jO+2e>4rDUMMw7CWwE#W=Xliq)973M z_;~E2vXSC!xK_^k`YDuHkhKlpMaC){(9E6x8VXE1+aIzwmjolCoSr#c2Tsusj+x;Wkeqg6DF=zM&HSr}SE9&`1;VEQqQ-<5xj) z&BQbi5dqn~2y zLRn4}%<1%V*C7-smVZ3%qt8kIZ8PL{1(_K4c-&Ln!=72t-*-U+vy~---qk)B^tj^< zKL<~1_|li)Y&}MR(JLYd6QaiR&ZUP$zah#0Q*Q?~Xx1=Fqr(lVk>B;wB!>w7j%(rd zZ$%n%H65U`xqQsdERr7Er>j2ZDImn4v7(3GDnaqZMYdZ=buHg8HNGQPxSYIiXDPFx z_axz)cG?5Q)BQXP^N~?l5C29*jShVfnI=_g=7UBxh6g!qut%+lJJRvQUGP^g#__!Ho{K1YaNy=8MydJ!{#Z#QsoX<} zN|B`G%bUQi&cja3tmU<`c)#|*16x_EBm@M#w4{ij@>34^9JS)237=DFuGvSHR1j=x zZh9z|+qno~=fV23(fPd1tXZrLlIS;uDnNqB;(2S{8m&^M9?N7pD!~BtIse-lIuVr4%(qsi&!egAF+fh085XjW3ei?~-7H`&clOy!KE8{*F(IfXo!T)|7oc)|t{ntg z>P98j(KrfZ3>x)6`k~mVUGm^qq;PvcWmcqig3g*}`Ffazq>L%WB0R=KtgVzye5=GgKbYSdGcGBUjVvd+)9r zf^q)i544%uFu9|isg2;_Ovy&>^8yyf>UF%?j7k0ZEb zvJeKXL2wLX55C!W!ECx*zKL`n-m2+X8JGyqifMV-{6rm2N3dhIjI*+_45Eq7@uzFb z&eDHSCh(ub8#A>HO^bGZg_)%ExynWE&4Ll>h?DUAv;4z%!OtpLIcrI!jr%w$x?*sD)?cT$ zef~S2f?^8!U-MV-E1DE z^G`hs-ki2%vdNM6zH$s~4jla%GcbmLFC7@Fr0nLFJiGQnU1T$Wd8>1+r26!Ugl%ra zn;N9U@tVp&;U%8`T_QEUB*t1)A*VBcJI%vaw==ULTE{dAn9Z?qyt?gd;b7uK@}mh{p-vaDQNP2ans2;Dx%vz% zZySYwA1&O$akp&cjcr?9xclMrKI-~}598j!Arv~KcMy?Fkk0)`UH-nTn-cyzFnaQu zXC2t8`;9th!s+79xFYKnx1EO%tL3w}>vt&IC*zlDeKKPBtTcw{I&=LiQDHvXZYW%e z=hEoje05pZ@N+gX3=FYA!CXs7pg4hSbB`()9*%UrRlkxPZoWV$w%__eWWLKHe$t*6`qp1Npq=079 z!pZ63d#e*B^C8yzhA{$q9BL$RdryTer+>6FRer7=GdgOk!z7+}%H z0Nb&LJ@Oapm5PYelilHl5NKtM9FgDKac6rD55opqVlW*4eY|I{R^Ye0}iXWi=iisbe z0$y0#FkgHRI2Fn7U==O_5B|_$M-O^Ov-_+}hQjw{tIFoM^qc=dXq*2V`(a>1Rs2N| zmz(K{-cj!S+nE_U;!opjI*PX9*BYy{i_zDS76grQLQ-g;4@Mu=* zb&ELNCH`o*jaH3y$0rAop0YQ*V;*D0S;AvPB^BrkZkGJGCi`hb8j}9J^|}1q?1cz1 zxNpQKmBS{t@V6JHuUxCCpPAYyj>2U@ld36eeK99QkW@P^F8QYMclj~xX_HTaqG(7? zm!&2_hhs$wCYWJ^0_32cw(ZCksMCSAkx5K+ei_4%!4w_-88;~F2S}7Uz6%|_e6Q{1 zyB7@2Nym7qza&izQTb&^Z(Sc5(o8)akH(aH=FtK6`Uz`SXy&t*GEAgx&pjpL5{V-;27usMuNu=lfu<~a2 zo^Zk3*C}=L%Rc-se@4_kMf^6eEAsL~wgiH9ZzO;DWp4+)M&(ZT4od8^|2m0Nu56g9 z_mmCmCQtGaGIEdf*uM;XpaxyX3VQDER$E@WTWF68@LqHqPCLY_06u_n@TZR@p) zTMWY8r@HS9WzN&+9o}azSQ=jRpU)2TOjHJ@v_!gP=l;zMlQwMBj(NFdP3tM=E$BA3 zd%;>b7RlrBP%M+I{Ru()aWlAN@@H~9(d49r;d#M9S?{^&mksU9<~h{2xn@HAWrG)c z(~*aW&wV0lbY&vlycB-<%cM3|m)1gkHqJYt@#NJuV;@4NP@}7LDWP+8=^p z0s#TrDD!)(3ybBPWh)KfB2-AHQ=sVcT=Ggp9&NiE9Bw+6vwhCzDS>Y`o_+RDrB>$c zMKRQvP30vlP{RGXM^t4X<)Cge=}xyv!esa#IU?=Ra!L+Ym2D}Lrn~33o&mP(mqME=GGG=^}enK4tim-`ic1Fj{}M>?PASz&Suy zTO3F{2S(gAR+*7uN1v=iFP7`Uzd$jlL})PIOT2GQ{+m;Pb9-$Nl>htA(|BycmyP!n zyB2WK_#wLMw&AlNzY&O9yR@|v)8b;ouWkP^81`ixZ#FOX002F*Yucr9h?KMa9=G6H z+1h}ab~YcV8oU_-fcxzbIml{TV3BUXsuty!$8}>0M&vpfXq#Olj$Fok5KP#ndT;}) zAphY*HUYVP4Lp?@WvcKQdY?q?N~2xo8l2g_Gt&QtVw*Pqy*3*WsN9Z=LAq>GE1~&3 zA_CUbnFzps&1k6^k>p}3$s;!VY$O|i-6G{Z48&YNw#ROJ@lpfpGlqdRb(f1dUF|}g zibiC>nz{m>cK)luSPdSi1|U4UBJu!ej@;NYsFQ7*k{R(#zmn?bDW5U!?; zr>Do`v`Jo0#G!@{%w=|3X3_)h(HO|vh1{)98G)z%?z0VcgS?PX)c&1w$$SCRKb{jf z`m7lt7dRx5*@2vlh1?E{5z#u2Q6{bqV&fnh*T0Xm`m*d{gsg!>O%Ah$+}{b?g~&mn zSpbY!PAwm}sNyHa?hq4Ej9h(fhf6<8)rsIyL0t1R?R%?wdPt@7z;Gok-n;*>;8A0@ z{!r()WjYrt=Hw+`Ms@|rGU*5~O7wLoz(hXlg^>9#WVZzMBbU>?H~o zA2R@s(ob@%vny5|R6|pH(O}Be$oX>RmR15vG~ac`+d?|_SjDb+|$!W7IDBLqZf*4dFofKVlgs=cO|I3r_-@*d$z%qk4 z%}#^unp6lv*eY~3*XQqC#pqil75@h_5Hn2ye6cLMjwdm|F)j=VzXbhDw#2U}61g>C zPG1ZhP)d>$9J7B0hwBeUvklP1!|M6Hxul%lD{Op5h{)NTDU>@pYZUINa(c-th|HM% zs@j9y@$N-V*L2x1H|ty|DEGWqL{j%e>k#C;;IWfdF%CnM$?tcr(26$)FGL%!wmzwQ zsJVxD(xf%l?*I%@HVHme0tYTf{ET6RwMWZ!6|e(xAMXr4441eIug7(tyqb%Pp3oB@ zaNEZ5?+6B>J~F?{pZ2t`<9}H#UhziO>-6PKpfYKf=*JeD>CjK%a-m|iS)QG;$GNJv zoFkz;?A{_pZEg^LTy1uW7xNB5!#p@*MM-qxpxPP6b`PTbbv3}u+2(}!*8Oiby?LkT zB6^!wV$;GN0lQ8F3JgZbcSrfH z!wD=iply`%x<9SvPSD6#`f!&q4IUx(MXNFTZ!ET{@*fvhEu_vb-Au83-TA|F2;qc> zHC$nf90Si4V&9{1zR`wY?%UKqMYBe~uSjvJ(k*f%MVXwA>m(pqt3|zZ3hCcG$_-BV zU?p8XkHmc+6D2}(mUi6lz7!!Ntybq;flF!}YEl8XgY@~mrEcFmvOCo6geZFL?!thf zMp1Bds$y#l8(f59_Q3eT<}K`-%6HZl!)_vO}L8%HQcn!_rZSt{N-1@qdif}{5 zEl>PF`64>0G~z8?Co+lVQmdb!^vkVzR<6F;Xmyqzogt>GxSrgPr_%BS9_;7q^i1#;2(!UG-D?Dtak%oK9$tFA><@!H~z02U6{%=@*>GZ1YoE=;KM8)*1 ze+7(_jz@k06HE-ljKQZ9Z_d^Sj`AaJEtjU@o^@Btd@4bARnXitg19ep&Z>(3JrG|I zs{?0YUwZCJ?r_)9eGyLCWWV}Flf^bsVKhN;Gv1ff;;ap2uO)JcX=-S1W~`7j8R~p4 zFDU)V$cY?SL~fNEVnJFD$}04nZ7GHvkCPB7V$P@1D{5i($@+@!kBYdE?UwU7px!`} zh;L*PD%Hcab~nf@SsrQ_t=CCsLTmgbAT5s`DfHJmX?D>%E7cM*mvY6W5>L|evMMfE z!whExuDU{R!Z!M#?C6+&U)f?^ALTb95-F!{pvOtk^3A_a0H>!8uv(#(*y2RTK>`v2@Oy|x8) zXoHW8lQrhkqe-{*I2BM#vrx@l3$bX_1s3suV6P=Y-j*|jbby8W&D1+~!@36>S6(t? z%t{>TC?~`zU zEjVn(Lny-C!m6-;Lnu1?0%TjX^WGC&9x?AG2mqu0B*Rb zKN&iUhb;*>?EM8S+o-McE49pC0-FKreApk*MgZu9&F!Q|30NNEak*b+9g{AHyKeIk z`T5>2nYr5BkU^+?<=mI&50pFwv=`srdJ5>O1Ut#-n=UPjS32q@+%p2&Pxh4mr#vzzRYrR(2az;9!$8~~!^d<^ z-Bj2ld^*qq(f1aDfc0&HSrP^63jd)137<>;`F5Uu(_R)OrYezx9FqqyYgT?x$lF>6 z+E;}hCs@MI>aob}lIVTXwyT8M5@9lWe@>i3c)Sng+uR)jm#Z)=;Liq^A&^RX)d>xF z1j@SMfRlM11D}}rN<(>eAC1aORYf4qgw;aDR5%88WIg1(L>KA=L0Tg~XC2@rblFJ> zIe^UAB6{7@2y40W2JpTl#?Ya{|J2rsz*H}iGLkuie;jW*_m?Q^ytX`kU5{?!0)I_c zayei%$MBKo^&k3e0Sw=XY>y^HkCV0fEhRXUY0YWBSu9jfa-m_skdIL5!+yBoMJy>=umglAI*z%QVgY5Ei<2; z0y2+)p<}|7`0k)42H~UrS{Ao_a6QB22 zk2?p{>8cJYirr-g{w_^Xy+V?;k*#H@B@8RmV(93n-**&zNS}V!>$JKCd!;y=Ggmkr z^+==lwZj;}Pv@Qkk7!&LcE_=X{OR~CNw0tSQ4olKUf|xZ=5qxk&xeRWWrS5d5Yl%4 z9k{;{bIl`9*MEU{lx1oDj{h|=ro!0g|AHTgX#QhH&=kSArcfXgOeKfwC{yrX=*IBT z{M8B>%fK?kXK!=%!q6!&q8-tsss#1kyab@tDWkx+!0ojcA`{GzXXC*g0I1XLK+xfQ zGqqT{hP0263D-Q^>ps7lTxKpOt@X)y%g%~YIP!F{^@cNn+yPQ1Q%_3xe$MsUaVbmf z;A&@L#51RI4!~K*X`kFGG(?InH(t@ES|}$lq54bhJdnf)U<6=i`J;U9*&G_f)2%FH z&Kza#1l5u^toZu6w^!xgJ+(#ymf7CT{l6T=wr9Xi*TQ2cC872VglQTm>g*lL9L}!M z{74yjsfKhLTy6ENqjQTQiPudQ)1CE<3^&WkB2sxiQs?)&3?^5r zAFBnXd`bf(_b;rLSs`$yw1YmjtrVD6YUzA57H`_7+ey*dzc%y^;5GfuH!GbFR$W@Y zD@QZ?CZ+>}HG@E;RMFcBW99KNhcyv$>#Sdm4L>%`?Pqb5im|gtrDy6L9 z_2qNV+35{~qN5}UHoz6#Iw~USd68dTgjG;oN~$6g3~Y*o{tPL2?*jn|UqcDPM&E1B z@~0e$lTnU`h=xx1LiDoT<{|yTrMlMQ(t6PzAR=>mk+*(=Mw**%HrwC66G9%Id$ zqr8Z*9VK|J_YX|jF&zfiW-f%m`akT>e!bp~{Q0ywR&VP|BhOF{hFE&;CU3OOO?(*1 zy6ScIa{V;NJNpnnrq?P|(L|o+R(&Fmb=Qq19z8s|g|;e#@CiNbVf2dzqg(aLy2)PU z9~j)4)w+$$1B&s)d_SE{Og+u>hnmM7p6LCsRl+=_w@Em}AhXECl?y~hY*(*AB*HkP z#&Ev~U_ zh;97m+0&aL5yr#IF19{G-}se3TqT{Z8TjQH7Jff#K1>a+a=oUc%*vg&4*NyBJ@z>~ z&G=7xo%RG2I}YBhUwF4DkXE}6noZkWqo>b2jf*k8ZXtQ{cK)`e2)^PKp1CtkC67)5 zM@|m2b4})1NXn!Rtwkd@RQp&(hNO*c0;|NZj^Wxt9MIhjQstty@g z&zVw0pM%1|-{1hPWx=NF$%qR7Q^XH53VnRy5Qq6~blT55R>D~ZQ&w^<^WyCx zHi4wgQNsxqS#yXucYpNO06@^*5^Z_y(6Fry>wU#MKqB9tAm;6IybR*wh?gl6s1J(+ zTE^yRT=wx0CgtXZzbArT1&aA4jRw82TU^w=M^=ILyEk4&{15_Dkxozz#C1 zQzV~Et4?-;^;(>Zy=1Pv*3KFN@3PX)*iTOQ*K<1` zqTHqQH3(QjST|JjUM9frEV;~b=_7A8BkE$IQ+}?SsHXfQakgMJ>>bzvIm>m!rGUSoWHrSYPIv9hH`*@ z%2H)CM%lSQ{8JNxyA;kQwiDEVHp@=S4G(O~9rLDGv=KOw)>mZ}nJ8KAa)lkSDI3s* zaDnmSj6cbLTK0IOd*ld?L15^jQ*JODPdlo3d4tT_TUW{!f|;PQzPO8xR5N)Fjq%C} z7#cNb()C?HzeHhZGUyF`?uEaHNh{+MG2qQ`5N$Uffa4%9l3X(AQF0Qc zm;b{_Ay3iiFattBhaTC{HKTrHG*cn$Ngn)aw|uhNtlH-F4_4cTAB}S2Vo=L8Rgmj2 z;JZA_m%YL4dQ>~!3m$w%k(0+1UQ+4|gQ1{Ialj#?rDr{pJfv?maSc|?P zG@`6qrwO}uu7eZ6iYS}U7%62=knUd*i8AUFg|&sZ(O zi}$5h&g5y*%;l{Qrq}qRuyrpA-b1$4JS%IRQp|9p!CHffa+k=HsJ(EuDOWbKTqK;D zGP%{NlSVQnZ=&<&bTChQ&mOLv&Kb2KNA{XJ7y7Ql-3EMpS989UVC&Lg^kUv%dtX{( zBAPLCHS!H3zjyZ1jSvmJ?YL!RI?GF`*j^Zev+lu5W04)29WEE99(HcDQJdrE~LC8DCBpK7$&hkEzPN z+!7sV)sJ&qSbhP~vve(DN+59r2eN&(Osd1j zeby6ZRYoB;)~aV zMxXVHAq}XVaQ+5(e`%NMZ8r5D#eAorXr08{E5geT4ZJ}XR)P7BI;;LF)J5O~TT{#c zNeSogue9on^hqiEP)MDsvtcz|>?tT00}lH#kqPg-K}j~@iI$m(Y4wkR?;acFuQ-p( z%}e_2MNA5OB-WL-X!yNvT5m9eT7Y`I?mA*A8mA}aB)dl})n&Bq6AUCJ1kO(=t{*xr zb%mt3Stx_1zGKiZXEx3A5E!6+}(%h#YJp;_#pbhm3|26c-2_ zPTd*9fIx_1nSU%(#Eo+e+RW%hO&JYWD_5UDZ56yL+vbItrw#HaTUF!t9{OEU{vWejDc$v(gqN5*P~Umt(Ts5DJt_H0 z4rnGRClIr4b6`OOGan`V04X7Mu{khsA5Xm=*M`&WT>A)M4~iu4dfkh_?2)ilF#&I1 z^{z_AcPA3ZpLO=r*^89AiBU+0De&1@XNjjwIXM_iM$Q&H&XP_l^gnZg1<`&l4t;H?(=xb7h8*ypz+E;@9NNcvw+T0*%LRp2iNd#(Uq<)40O z`S%_jSws(%T~ad~{CBoR*O0dVafnp!{%(XQ-pr;5lKfwVrpzPwEYK}TUtny zP+nQFVD@@wg+|g<;=^6DH#0{l*?#{Z(HyP@_~}7{O#N7`;j*+~o3BD8i!)8DIy0{EuxOAzQ^PRQNzi}X&0n6Jk@-2O2SsP08ET8iM#Kt5tD zh4uTS;SE2vwy~Ip`XWKlJoq>?#kXAs4vkki23{Gu{HoQ|9sm!7*o}xstUJn8{d{tq zwwNbs-gL|Vudx2jRFjP*pdWt$Z^sX7?j=W?b5B7|RaTb0><-468Q(2_*Od7ac&|hl zd(CP%5#>jlwMl)W8((WCD_t9=1^(XIDUzwxmbHltha_Fd{qN+ADI=AVo&5{e#$T@&CG8c4h|0fs)ZaanA>~$^h`C0yg(htuyYKMxfEHy1(4+{h=@OHhG z%T+YaOfW-JLpDg_mm!9!yK@rYP+en!4B*pZqA^fx=P19coBrqV<8e|n56OIhX-1uyoUqRo5 zU2*{CFP--O0VUio(za~;`>3-31mGN(i%0~=9a*+<|9U_#03j=n7eoFtfD|GBTO~uV z?^()IaCz@i=u-%IWPIE;0Hz*aRwmwD{&SCDXe|K+zkSlt>FDsVgCIjazJJ~)JPWA8 zvSapH_RkpX){6}6GX9j$)S&0*Hn`}_v9>21Z<}(PJ3(hjjh>{$m9H;Ck4?IE) zDRi=5LvqPOeM0KNo)P#AdL)W*(vLK_(spY!yrlTA9)QZ=K;O`z|83zEd~pwcVSaGW zVm+S-?h=KR$9Z7S5QOL-k#`UZ8Exylvd;F&M*=g$FN&=lEMc-zF86-c8Q~SfnQ&v~ z;mKl|pj>8I+0-zglE8qblz4PUyBF6%>NmQ93YO!0>O&Nja#uHjU?N}adz(QDg8$rC zi<1>@hIWJejK?e}$lm+ZD-L;!xBy+xryM-y)mRdSx)dkJcl1e7fYG0`;L|sd9?uP9 z2F5-f%wniV+`=GSiBEvLy1Q9xOs);+BK|nB#xv1X8O`pc3s6oX}UYxFD*YN4S)XumP z4WzNupQO7@;Istt_-GV)Cx+|rY*_iMBI-HrBUK^s3khlL_%AX4>4Zz8#bSN&>QwRX z_4S|7Sy@ZC2btq*ailkhqF~TC_9@GE`e$SyAni@~X+lzZ6(4d6(VG=-q$XxF59+t9 z9d0-*iLRk~l|TNk@qXiSB|E$2011GjPE zEsnmH0fI1 z{5(5}9x-l}d>_AucxCdUr`K5>80MdN7k0$&!ohyCq7Zo-XHC76begbY%q!LQQu3ZC z*Ka$lnEAcUpxg2CimSn8U&2>-uY^}sx?g|pnMGODnlFGV>eH5`-g;Y&qe=Tmwpg(; z@h0{s45!m+-l=A%j_^?Y+>;ZO#q5NhObpkEFKt#97l9WJJ!O;M1pD;M`n~5(XGe!T z)!Z*Wxu4dnsvU1y(XBFrki7Xmj&AKfb$#^kAif!XnRUM%9Uu2xOxd{QyK50tSv*eS zGuvjhBwj|7U>0l7B;Yu*!FjL_K7uld(qc`Rtqz(iIjE@;>`spk*Fh8RM+#{*@q-tI zmWS&HK*-{mz4pw$v2#cDY|bB&=iJ&nSy}i<9-0>-7Op3>B~QQh7NR?!#tVfmGLX5p zq6XbAFcl`otAsIM(qr0n%I>1k$VcGxP9s%hk{;})R#gOO06O25W-KI(!)~sa85QVk zvAQ<|a&9=sx^Pz9-qwS&M@7d8UOPX{fQrzp0baV;I}yT?e9YnbJJ;OCD_}gKrmZR|=>^U=A4$D0rq+Ea~OvdB=-LzFKGW zm<*vzN`SIS#mP|5O$CAy*!f9E9e6-umR^ZK^D2xZFy)#DHS$$iw zT|Z1IEyp8nYNM>+%1V423vzkpT^d(F^FaikJvU#yvWNr)AsRjJj*o&Ym+OiVwiBpS zqz;Oqw~WT@j|3=p2E&BvAc|75$=e06mY3T+oUri_^M-2w$rnQ~kNBSjRJM(F_K zO)Y+D!sObW?dnx6yaAA_OqVnYD1j#PY}=gZGK!1pmk6OIVMp3{lHyasTkm;#?8b8% zq8dgo?1(Z@s&%u&<`^%xU{TPCXts(JzN$_bsDyPW6U#%=rQ9%uj&Z`eK#FK_{@ujB zkx;&X=xUO+d_TeD^7GH~&oxhVNK8&H5*97xg76sSOnCAk>%9o_fDoV!U-9qs%`Vu1 zf#IafQm=sPl)Ov53!LWQyDy$?O>uMNS1?k-@8mRf1;XyHh*4gIHDupplfjUM|A^Bp z5U^vPBAx<7q@==sW)l2uRC|H0+H2w<^i8FztD#bpc6t}q<=K(4!L=FoK@YK*fq3)J zk*U4#ieD=6zlD3Cot2ll0W?Jorf=>ufLy7@ZlW3i3f(w*#A%SlR}NyY0pJ`fY;>yN z+!*#q!eRbjeSO8_Mez^h7pKRX@JZuvR%VJDfR{GmJwS5xZz-L#95pfU4?K-Gdp8v~ zIHwi2p!=s%>K3#bx4I1fqE1(~?56i7!}Jo8Q+2DwvWl31r#h^evS3!X)3kB1KX0?5 ze*V+Dt0cDZn}1-&nxkwAMS683k%#vpcAYSSoo0jBdV0dcmJ5%8??eU{Y1{|izY9K= zno8wzFQ1MWy9ma2h(}K|qy#@&tpS?@Z-H*RhV#Ez1PGiTV^Alx47~dstS3M_YkyPY zQsX;bdS0C9O0=aL(fbk8a|$*6!QrB6S_k5!*D}5R6TDJUTl!RChL~QM;A*+1rgkoC zbEcIPSjm1CUqc@bks#ws}8I22z1|!7$ z{Ce&(Vi+<_2mCDkqnRdRg=4AFrjf>vBrn|hUh-kITXDan##D9InL(GNwXi4#t7n1! zkX3er{n)il{MZEq$p;CTV2n*r{P(-@HwnP!dZ&o=z#1IAj6o}Y8y~Fofn6W1p{^~^ zt&@2kKdZ`nTg>~g?#TYLYWz}yXF4bK^nIJUakpC+KmP^ojmJ{d^i45qa~db*`ZZ7f zRc|%E$H{H;cPlbIwhgg!YtJx;cCU^cBqZM%7Rn%~R7{wx$7iPJou6ffxaYp0|!D z`9m|R%5`s~#`{t)FOM{N-vo!ACgDjIox&svzY|yAkS6tcnJOh71GQS?50}8V`H>Iq z(q_VUou6337F(+ROtt( zoS)WCBm%^i5lM=4<2@H3`cK#Q$;7JFc=#`q2pM5f;R(*fbreyUv_!2dT* zFVP1k27A58{P9N7oMI-NbP<@eR4WZOPOA&0YV;njSA%LoxzwSz*$BZXp1QlsMXk#ED4v@0Ig5`4{q1fKY=g^nk%X7c8LHH zSAAt_`Q>5558M?11!FzIvGb+*FejqtxSHx$_4O$8#8412pLqf{ASkpjq88SAx?i!| zs{zy1I_i3zv0ALP)HlBYdft#HB|rcuL&$yJv{QC%X|mqtQQ%j41}GPUuT-Bb;WrK! zDl?bkffyobfCDpnR@w0^WjSAFakkRw*O}(JY_WfTIF-9s(RyBU{|1=t0WqInR-^#e z8)zRPp_e3DEz^W5Jwn%14Vb8egA6ZO&-+sg(=AP{YQqpbuhZ%Ry@8rmUAP5Ff9RU9 zuIIax@^GmnI-7EFEf0W-2s5FTw_I-prpWX*#1Q4XFAesBKF63+1g@F!cpB&YIS>@A zJ`uq@!K3t_;Mj>UqqZqYRQm2@fjzT!F~$rl1D??5Hg;(oxRN7)-IE2M9?-oT4^jbx zZlY$#y_);>xBHdpItwK$F8~J*G;5Xu*oJniW-U|^d%zQqxdy>jOcK3i(DPV=fVd;4**oe{Gxy4A+HTRWaJ!pANX9D522Z=JNZ19Qq z&KKH~LST-kOl zwU`ZnCGo(2O?qZ_d0bT#pq^?1Sa|&ptNF6n8WXVQimmFZF4X1INo6PBDf-!RJzCpB zQJvKS-QWY_;}jgBw?5yE^3nv~%V=BTY=h?V+iAzkdE7y!pY9o35?~J8QfIxyL}Lf2 zHm92$cB**VthO{tJH-PTPi_6=#8$3!BcMON@p-$e>$uzYCY@clx@UHIh48+;-c0O! zD0iI4Q6XQB+bDspALXx@RweX&xwCMX3DI+)4h{tD;TZJXf}LJF7^Y^kxN$__A7t2 z@&MB|4r$r7o7hlntEp~bQ%{YDZD0c|fre;nV5u%XRYU#kX4CCy$9XX4-Fgpv*fa)Y zW8}j&jcb^yE&#)7%0`=9+gUp++e5 zAFOuTMhAY3(x~4`aO~wB!=G3ycw|-eb=7lf=f}33 zOQ+skSS9X-(>u9p8-v4@s>j^oZr0k82VU zB8@+;6t_!MJB>)BR}8rKHSe9a;x)S2uG#J#P^ArOKD>+s4rh+Ms+py%4QZ8SIVWR3 zbN^M^cz-&v^Qaa)w7a=`X-KHG%(zgVWxOcQAT^$e!C82Rxhn;;uGzZM6f!N)Yka@p z;ZU@Z8eD*rs0fVpBdiAD=`3BXV5ZDkR3-Qv;6m-zeY%S2g?wqesE^JqZ@06;KDm77 z-y3@g1=F2zYURE~Ia)+aGWRi+G@vSLS?M^pf2;HAsgc7Rp_pjj8p=N8%-{VtE|aGo zG9CSLmU4^5{q<|-3wSW7aOhf&`Q2A_x9soqb;!Bn=~Xu4w!+v8xSRK7EPCN=PLoPI z^^qIt?YsN?^~4rw&E_2t=XInv92*oy%3ZXVrcnM@+r>#*&1($ zP}%(ftyGBvNM0k1#(N)zBYd2vL#KV$-!=Khw`NLZKEs+%?jv73swap_pS@vjz(X2# z5*e>sort0L1Z0_QyuDNl&lxNECQ0{>kv24!VNVgeTWo7(fJaExK>l@K-=y71Rk}F) zbvxm=Oj=*()1l(Dl{Vf1Y?ayox#3E!2KBJ}mge8iC0O5LRJeUv>jYg0;NN)tlo#j} z4|=F98{1ePs6Fg^M4Vcarv{#aGOO>8sF7up_2bjcOx9+KvYqYZ^`h&Kn)Ye+h#UE^ zzQy%HvQlMGSFT;0MwIjfg&~7Y9`54b!jMulu&{R$FZid$&&F>Z;}}g{`=PQQjFp{R z8O8j-7_7TxsQqL2K?l^ee2HHP$G>n-%&sGki0*;I_C!}6J$l88xi=N89@vFprDNMX z%Fjep*>Jh|5=u5c{PE(_g#^Et8~Uqa!3&l7L*n|NAR`XNQN*0CG|JWYYj*(Ed>XKo zsN(tZXkaf#_@y2Jx!Uh|ws_y4Di}G__qq57%a_ujMGj+Zf}GI)2rR8u21@UpD0a;i zS?VG}sNdMwA^HeVNuE8M*IpOxSmbOo4}+R96zFxxRKi3MUrp+av5p0ei=iD4!%)9& zx>x!CItAn{E_jqKT;e?0MA6+{<>ye1>z<_o$)6bmQF}P&&e-IzKte3lfbra5w}A?M z->;GT7W*zb^tPLEZWF*i?zpOIuXciu8YPSbV2HPSJxJ-Ek@WjhxfAuHf{ueOP`;Mw z;=dr*p-ivw02nJc`ZyymslR>X z-8bvrh%lpBr8Lynx$|lOcREKDK}Dp73cZesN!73(X3w55^zx+jqJtS(sYl$Y_i>O8 z-5E8!xJLarhvt_d>=*2;i-!4=*2x>hY}}rYbNLFK!1O7;mp2jALI?RY6fAP>1Z%H zzeUl$g0|bmeFRWknT&)VWa^PGG*JF7$0=!i{ve9(7Z-y8p;r+a+V0?g9_)FjY&kAz zx+n#7noj5i+q%xrM}^{PxpJ9&UvnCa;b9ycSB2NTZ^o)z+is_&_g^@e91u&8y>Erw zGo4XCpO{qHc8ZhAbrR}xb1TWh!=#IV58*-rmn>xFML&`K!TE#^G7Z)AB&>CNvGus= z-ZJ=9&P-QAGzRP9`r3{jp~@5-*g5(lSa6T<(Y1T}4@+WL%4`IlB9oGP-ZqwyUxUcc zBFF%1&AEQ|XnOY92P%8ZidQHIM+cEuEu#mAuvO$F23S`^C$@6EZm)3rzfr&LnWz9c7FPk=WWp@ZFO|4sP8oi} zti=n=J9b4SjT&D;6?)eZuUsGEKjce(ovu`?&C_WTr>d2ysHB4TN!>G;@bCM-yc zgu3l?KYn&(JjO2VX@{cJ?(9Z(GIB5q=&FVU+C6;xf$OJkqN=y`FCyo+OOmFVZx*@6Vn3&EeHd=(r!J2nuKIx9AuUaua7zMM zvIfkipm6!iLA#_O;c_l5pL7nlf9PDix7BU+!dg=yfrz%P8t3P3oek{Ima+&ig);8a zV*Fp9$08DW86Ap`6+~wk zpQ%wNEp%u;N8=T-;U^8FfNa@h(QS5r!d3W87c*N!WaCVwxz6fqH8EL~F zCH`aGO>sRNb-9zv5$eVtz?LXq_n{#sj2`sh#@4KL6pa`{67IcT`b%g$D~?0RR{UID zrgSHSGxvSh?wPNi@5rw$e=`3IA5u3%q*c%H&u zRh_u%Z?;XC?ghXG2urD|afqR-A4|_#h0w}=oyu6Nn}{rD%HUG_y9moHX;<;9JJ?;Q zyE_OD428_2uab*ER`&mK^%hW3gAo0At~KmLx;44fOL0* zbV^DM4bt5W|KZ;I{k7IvvzRr*ym9u~?~do$Th0ba+4h2-{_ZCmxWO=Xnn4r_Xa6cF z6S|Bm-xvoq#E64uJK0NEr3RI*3T$VRK4Y^eI2Q`&(L)Ek-m^!+>a2mVO)qF{5#6QayrAuDQN>4eZ2v6Mhjx1}Ud z__E`eApQD#T4PsDf@)T@Sdq{0^R)Q2D=4_`M{A3wk%ah(l+poO+kiFBeeC_7VoXhM zpx|uj!|7s3>@MJDarL@2W>IyKjG8|G6!Fbp1DL7!YBqhI7(KQiKeXB5WL}^rOaPuSH1(Wds_1h=cMA%&wSn~+2|ta zVHURUMg8vneOFBT0Ywhm{w||%bzP#Q6q8nT7n)~Fmslb7N>V>ZOmq%=BIYtEM*T#{ zND<@!3-$xui0wrD>d-ATIqVWnjLCa!BXz$zQ}FI>30J+(nGTC#0G*Cyf>Lq8+=5hH ztLj~?UaRknl@)GVoDq6JrLDWWe{xHgSQw&5GN z{Q0$x=lV~}ZiXp1VKf&LPO0y=i41mEXz9|O;;l&LneYX^V0^}~o0!Fg0~;HDe%O`5DdiuN&N$3PhuHI)QueKj zdkLS){?$p$z%&~^`g`0|U0V*XlRx?=osSwwrp4(@@>xz-ok(^*?;Z71&64CH^lm%9 zv(ChF$5$$UU>hr7-hh zs>$^2h6Rf*kA{qq`>uy+WKPK_1qZCir!pBt4TD18f$<;qe^U`p&~PHZV$+zH730A8 zST%LN?Cn-ts+(ZXQuMhY?eCF&L_5E+_n!vF$U=$=ty{enQVXnBU1DVoB4t)wIgkw| zf5+7QW~JxnqW9*5@-a)y{TWVTG>E!b!3NxveP*CX$T4jug7BX^Ng>ZQ8%EMGfBtuZ z;QkD^*2L{vG2gIX>zcmvc3uLk*&C`FYK=O2Kco|p<9tiC2B36a0WrQPI!3?24eghOe})t#JZ>BTw;B774v z{~T2}^Z1OmsQmh3o&8S`b78o6llVB~58u)|o8yiJKA{;I;Cy_3+%_P-|I6y``;bt% zg+auj#au*yUXN2hHFxvS7V~8Qffxq57xHdVssgLD%AtvJO8(+&;EeS|zMP4Gl$tjv zfsAW)qlkN4jxZmWa_*W4SHfK z1CMA-3UTpv!L`q(QB`?JeOk;a9O6H3YUO`g2*`LOCbAw4R7ix41J( zFu-Jtb&-6KhfEjtGLkDP5bQHsgST}n^FiF$H`$i(@@FThPK99)MWyA>G zSb$;VRo@}BFyYHlVtz#~Yc zpn9PDVpdW3v9^hHJ0t~e5Kt&6>wn;3!X9d3a@Y5O6=vy~p?OKqcem>JA^Ydl?*hle z(Iz5j2obKTVu`3*g4cG2>x&mXiqbnE-*d!BOnFqX(@Sr2FI68hH3 z7KVy+YP-)_1`q`zikZSboZ4nXilB148crC`;z5$$9uBB%R0Ds%KON<5n=I|N0(#H5 z3$o0AxZTAqwB!G6mN`ughv~E`I$WnXy1)J@?)#9d4zegclwt3*bz`hl z7z~L_yT1)>YQ+S1<2rWy`HJMzNA|K8T7`0KO^%->|FNNh`(55z5FA_lBjmp7mtkq} zmUFRrSo+}LEM-Dswt(Nbr-hD0fOb916+&b+6c{HoSE;yADAZ^HOFrAJY#8}2`q5*> zX?rE7mNC2CZKv4-LzQglqlEEf}fas`C9Oo+U6Nf|Ca(KCH3;x z_;H%goeRGl4rQlp`%M8wC2KbFeZHIH2m$JaK&4;mWH>8CyyW1g{_8gONTpR}Cs)-< zDj^ZDuoBb_n7FQuW803z%W@M&2I{4}_Smc{H~PJ=Wa#m0@o>=6%4S)fdO>K>==&B; z!;G?z%MI^naC`jmeS+6GcmlTQ;si$q{8v%BC=zi~HIklkEP|7KE(mL%1fergo~ZG} z{(qtGi=LO37mXgovwwo)1Z9QqUO&K3BuH@|o1@9TChBm6MK)?i3wCOMZXLftH94Ay zVHGJidcJZnyBuPpLYAO!yxLZu%zLuYFp&D=p(m5XVr9c$w&Ht65iP2(fQp3u?5>r$e_P0PSZz7Hv0)Hf%w_WIA~N6W*ufREQsdab$7!uys%G!L zSBKMwKNPt6jNQ6C$l%C(9He zUJXltRii!1EEDOBakVSfsimBxCK!f}1QdcghgxtJM+2Ob!E|27kw1{&faX;(!u&p5 zbiOudF*i*$q9auHKvkKJeY#%t(e~iTN#I=C4{;IiKT79mx&T)ZLXXN8A*5c3OFv)0 z+n)~B@>w8z6$t+l z_vuw&@85a*0BHu523tK(97p~Vu}%aOtRS?a12BET_q{RMODe>ms{zd-V5&yODv8+s zG+?%3L?dv{Jk{FL_*Wer^_g}jPfQx{4$L%2o<*zDaIT25P^u*(UX`YO1RZHi;Cuwt zKV^zHcT*HUwD8Xa#N%YGEBG}(`4NqXEU<=Wdk^874ug-GoC@#2YR2fMiJ&IDIIcr5 zU){>r`BH6;RCd$wy5U{dPal*G054)f3F0bA<1A>jXAg<#lalrbh{ZiN49llPMPlRP z$hVlm1IKUN&EN^ib^V>b3figN9G1d*zcBpb`vXz+KtLnSi?ZD~f5_l65OTY!tBlsL zam)LA1mis5(bV!sW=zFeTC$lnJB$J&%TvuYK`{$8YVa z&K|whlk2y02-&jvq+)$IVXuj&o9OY?XvBso=RM}>UT=QiUPg5u)oS@E3e>mR&2OB- z+(%Kq$(tI`!j8{^Trnq{&x%s>?a5PR-_!o7h-2hiBznJEDd`&)v@Yd4-)fy9*K{mV z!U3iKn&qH%xOlN)XE_~dru%OMcgrP@FZoj*-zddwA;JzzIK*OC%2#yX)ozX%@5pUv zPuhEA@qby*vxNkJtZY`VkMvt%*N zEwvo8xsTuV#5x-~(&*ay6jAyAl+~3dJ(cN5%JGz>M zf@hCBTuJ4Zxxy{|BX?TJ?Sj*m%jSwg%;s-oA6z4x4Y^A{=yr(i3E!otPn{^Z4o1PO z8)vW{=U&}CAaLkp;vpiK z&6EKF6RCG-raA$2Km2ZVYl~eW( zfCL8tDjUE*1u4_!qlWwx&l^GQqm=Ba{S5+o!EAB9diblVm^uOAw{W$Ak@Wls54Ge7 zG2_s1a}FhbczjG!0wK(Oe`os8*)#eM=6;Eql)>lBE)hp>b^TS(?WYtoq(_45(4^LJ z?wo4T{~1>BmW;zJa?!q@>}uV z5pDF}UFf*oBJFfkr9~Y(?0Qqj0-N*^7UhLbP$qO=$Fg1U&xIvb`(37dxVKcD~`W$DTNfEw&N_vtoBc}lvYTAnVT8Y>AJy*Gx&7{9_fmX=+B@5*)D6EI_U6 zt8dP|na)@;@Tae~yDHv0d<`p?Ji&NV_I1xgVEMi!na;Dn?&bq2b3^?hBg*wrGNG-< z_)woy-6l)P7n^-XeDpm|T-_|5UFuYRwnY@2x+)!bp~lNuXZGx=nwhYp^f?3c1bS;g z7x+AS6tzy-)g~^Pz-n+*QFQrj7{7aBC1T3@)NM8|9Pn+T4XU#m%!x`xV$QA&G4*%{dszPj=)2!M%=rp(S7IGFxUJ>;0FRNP$H~c+s{^dsX^N!=H z^@iY1pO6jM--7po5VLPW9kAwleC;mZ;>;5h@~V@}#Oml7kF5hb6>$&HkSsIA%FQcW zIZDM&3x)bBmDZB6h>7L~IReq*VzmpB2(X2SAR813`kO2^4bA6+@s+1z@8*inu8wV$ z?R%u2uE&W<$%}@jf_yH}&tB>lA0}aImGN@ft4YqJd3G)2)y~~!`y;d%_A7XjG@al) zg@&_glTADEAAffiOG=B^<-bM-%v0gNSdI6_bpH9mGcTWTQoeGC=RfTJ3-Zox4a-;UV8Ar18Vz}iHixXLl z-e?u18dav>2E$J=vTOq&^C{r+y=n1q6Lo!qy)Gs}T(Ua^Dq=*0iTuXN{m#{;CD#=j z)3t}@$}Sj=32mm*Nus8H6>_k$eC+@;n?l@4c?Q2LdcJTE{@C5`QXxuPLbrptDrzvh zVk=&`PTf#?HQ^LqNszw-21d)LW+hMn7L?qCRxmt|9CKXg^4o`OO4wqBL7(PyRS;4! z*;V1^lxX5vEET}JWVRf^AU#KvdM;8civM8HzPQ-cWFn6iMK6&u1L}$eGTCXWkzEBf z-A5ij8WrGaKv{meacaOnl4BB#F+mce5?P(8!Ldab^X6HDb+h_bl2d^qpYpV>WfQak z=G>}#%s{Pqx_t;kc|XTiFQK@>o@gT!8zUXj2dbs@Qv}YL?|eSH-CSYng|1B=D2^Hi6=ir1G{pB ziapYM9Ouk^uU(pv*~UqkcqODi1FZQ3HXjR9+O>3x*UQ=Cl3p}{`&NCC#74&jdv8{}MIYoNWpq_ujo>Z+44)~b-v=-)Ldw-WH=t*O@+z z_S))mDJrAozw`>tE!#Wz4yq5_-bs1eEuQDlTC|_!m;BpnA${jSlC8vws|vCf^quUl zUaeE?;?wM~)1|JSx;1hSvsD_v`!rPHQ*C~(ka~HrUu|;uh9g|_N1R}uQq!C<*5duR z!LK16wePU&Wn47KaCV0UYjpYDgaP+sV;=}TWLccrlR!9Btn&saae6ahFpk|aR{Pc? z2ERlJRurMJ>>1Mvw(8I`vdse0$(%l%S11gCIhWIY&E2Huqn$S43PFJ+{I)<-Jr|l3 zy1*Iuhr~f^kbM&4mqYgBL-uMtrtZJxd{?ND-U-q)b9`thcKqi)DdwUhaU!2Deqd?(9P88QT-`{0z70myYv8W3H0iSlhe>W zQp5sELd4{K_K4eL6G<*O@W@+i zTw1k3=S90Hm!h_)h>F637xAH@)_Q(s6+mbjL?un@{Y^P$IrQ-gj=?!nXD`sp$*A?W#D1esTh*)u2OU-*-o5Jl!3annt9Iwm`D251z(B?T zmg{W1N;;vILkn5BurrQsb{hjfH)K?GoY?W(i_kl*i1TaSl)=B1KF@eYE}W@p)rtwj#8( zq-PlVkeAZ}i|MV`L$&bLtUPulRR&G@8j~m`8i$=y%?(y~&rjCdEL08*Ty57SgVC{w z*?VwRvMZ|cw;&K6)DiUr2`(E($Pc&re%VSkaBdM|=i3}{@EY;Itw8jKS-(f(5Xa&x z6EX&3F;P^#EN^c7uL#>!_wUa#J&$;I=Q%jXLt24)_5%P{t%@Y+Bs$Fe8G!ki!w`YxDO3?vN^&|Lf7p>haOM`F(XAoCaXyY!_ zmr8{$KU)ZB41uHy%Jp`W>TiRCH=$EzSe~V9@8OTh{MMneZsJK}s*csqX#-QUNs}!Y z@R^Il6BDJ2)79FD#Ps~Da_>`c2)M!%ERVU&(~1Q18aDf?ze-x&#%R=|WpOFta0i%Q z@~nh|tJ5_W01x*jn5Qf{!)@==-MZ{q zpEm+%oKM?>YZQ4u_@keIAe933)!jA4ZxnG=@op4dfhCFXmWxg_z0AZi#@SRl{^L7K zhg=-0Nrp9(EPGcW)9~}k4w*;Be<9>d>EV1^HeC5kLaoQK8$yDPmpsBcPXR(p4?B7@ zN4SY&u^WmXMX?U~L2vS^&F=41axXD8c<+bGqZYn6we*)BG+)=nzur@C_^*QuF)%oO z3V3LD2%J9*uVYEv_msrBBjL{ayUROYWfq}*^+3v!Y}FF_yv6R?bF=+|xXJr*uS2`H zxWc}75ryJ{|0i+04t&)>bZVC}T{nYaVS1lsr0;=l>!WDmwE76HNQG*_^Bmz%4Ea z>E1t`6G8+Q8l9pZ%pV9@}tRz^CHT|_#EWRz=7fAUqP=}PwT!kS;v zD?gZ1Vz%^9ck8cZx|u`W(33)oW9c|VE}G$Uak`~ML!uM9u5RTzQ~xfVZ7lI+k?SUI*Axl1n6|GYXx#L^ljbdaZyQ;88;xl{ za4@?jqJT_codtY$Nw>5Xn^FKl;W-J=2;!YAv{w{eYRz;JNO(7?qqAB5ACYo)( zYn8F&7#@BJ5QaV9XQ_{2cN-)Kuw5uz1Knkht!i4!NjPMp`LDG0Z!8!yfm|}ImO-vE zNOTau0KzU4T5b@;ZqGK$zFH___eeuOegS|J6v!bUE(uz>nh;S5n!kZ?-vrm=-iPLS z!y23=Q$7*6_Xz#QFd_>gR6z4C2avOMjB%(z8b&sz7dsOUvwFn7f4_xWAQ-{J3cwGd z7XHGLQV}j+vL=Z?vkqX`6XjSJ{^NXAay4J@AVxz4!58a##fZ-zar2t3qPD5 z9H|k$1R1L(f0s$WPf{8u=fyVuXE8NUu3giIk6JRbaO?=QjeL!_w*wY)#z^vgI1RnE z_1i|o?HK{d?23uYm6d~MQ-MlstdDFt5=EY*E^Dbp`TD&s0K(x;kR@rA;`$uO*9Kpb zE~JpCzZjZ_W6Z4`lrl8=g$;D5yQc!`Ux8TUmfDCV|MDdBNT@KCn2lNdg4ORQ@=(XE zASI=aD>Y%Nq(BR-V|zCRaf=A6&fN@DimpOe(YG-%=m$C6|2Eo0+dN?_`;`=EJ{WN< z%hM-9k@XBlwvu7v9ohuwFyRm-uKnYvf&JyS77S8uIRO3XX3%ek{^1l3V_E(p$VcSD z%y*@3#rIp}(*$-sn>^pL<7Lnt>t?-2RK@1Wof4r+X-^*kZ`s)oUKOe$7LnTJ?{q@2M~!pUfql_7wZp0O=h5r+EN#C(of z5dkG_s3!=WR67oC1~HoluQ`g5;E=AW`#Pv|KV z$H6X?S>^J@o)+-9g}tB3rQT!}Rww8@t-*nhL;LrCV=}D#CLvKk4KaKThsSJ?T7N&S1*w zxQKSniQb@y3&;!CS+Lwpb{8IKdF(Hg7bjYfA5s(ShmDpe=284fP8~CSzzRjJ8~&mA zQRC6Fp|je(SI3bezqV~7QAqOF&;Tl)8;kG$TJo1auEcQ)-$Z~YCn*l9i9v-p!xF*9-;# z%w<9LnqL9$uD5Wv`euox)RMpBJSaV3AK!|920b`n)ak);|B?S{C}k!_CnG?*a`SvC zs)!;&+dWk~JGMVH&EOFdBNDzXvM|y1?_X)5XK{)mTT|H_hypuCJkpR(`GV=r;E!J|Z0v4CAwz@)V_4-jxT z6U1wK`fs3Me5LINMyu-3XZ(9IUTLw;>0u3-Yso_<6e#!ONi@?!p3NUgjV6sHe(C)q zSFMFpjd;MHzWm6qkUt>E;m>(~*GLigpN3~ma;q}ix| z^Q5eZeQP5U^%C^#eRh-U(28(u7q#R4C(C`Ncs-1}L1w=s_qB_`-M4&!`y)?{+a^Wq z_;@T15^bc<!kf%}6c$7^(vgP$}eXNWe4#+&n;txE|MQIT}(tr?&@ zKXH)*K(sV*LH_Fk?qXEzbaE-6H_lrC^MG3-6#6j-7df5EauQ|o0O5yAClLhGBA5A1 zkjQHsDW|FwZ_3v=hoDZeFp_a(D5Guj?z`!?j$xw3Zt2^A#sC;5XQVqIHwT*1c@Ouit!!_xD#@iK_d`zx zD83l#z8cUeDd>>x4U-2nZMrxx>;fplFqT42T`Yk_F6Q=hf|TT}#pRHMTM5v-(6hUX z?zxa>V-EpAT$&txaC5ZKZ(G7fa<;8nDMSiq%OGtk+a7?1X?BwI`at%Lx3|0osh@7M*1i4HZY@_Oo< z1qCD<__zl}8B35&kL_yELs65P9D)^xU-63*mD3gY6h6Ay7J4QOb>ohljC@No)?w*) z{*PI#0PmJm9JKA@k{>GUHBrp$y5=>*r`f8w+D6wm7(O3S8lvy)Db8R}R$WJCMQcz6 z?=V>uhGwt`81ucyIbRKQ3M#%rW*IbuIOS*7gs}(b_>l)Q%lJ##ott^SN00_Tcan(> zU`Kw{oBe=(>)Rc=AhT)3QWxFLPZOl`BO1M~GI!9A|CiXG@fFJFxn@bQU0_W8-r`Vh_|LSg6{g*sV)fS}6zPeM@TL}IN#s5Htt5nM7Wi=#D zU3wRhsjdQxiG1)5a)>A}e_(nQY(<2l?nF@wuwmj0fumg+XX-1@!IFjoTmHSuE=%|J zGtZv4wlSn%t*A?#P-a6)jAFFN>z;i;OcZiNI&1Wk;1-$Nj=ND-tCr?Ip;H3mM+3P} z1=)9oR&DkTE5_%u8RuUE+Sz^xb4C><|ByEGCj71(){C;SN!i-i@_9AT;?m0-+uC7~ zEx(k;iQ^1x1Rck%svQ(8`XWdU?GhZ4Gxrxi>Dp!*-*$V+uEb`4Z17T@wxsR>-aa!HicxW&R4R9q|#w`gA!+%)&8qvhf{3d=Q3FdJEuaKo+v}! z?D}Tfz~{lj-|MPtL8IX`Tl~CJZ}aa%_O%?F4>r^DgXitdeC&9sNUE{JekFM{1w<>6 zm{AlLecjb)O!p`uEBQlB^PG7PWs#5f4$r&%>qBqdhROkN+k7&~UfbY+l&zcY+2uOe z-(pWs-WnC@-%Ylgmb+diq*lTjA??D;M6&$M0PJ(4=euJ3dI+Ub-to3Ko%h0WE_dE; zhRrX%v6e2xP+^}I2)X6-{QJP2h>C|}8{V~^n+*p*o4ZhONtzM)cd2>&9F%v6dh<#Rs(O6C^NY$$FjIl`RW-+Je8z}Bzjp0eH$3;-ifK&scp zg+UskHk;)*EQ53r;>Xb?2E=3<1sZ?t36!zu{$!dQivQ{!G8OZcowq_nFEWBMUsLdX zNpwFQjhj5dSivIx5PZH>7adU-?~wgda7eqfkMB^N69<~7)`ba= z;CJdIt)%3@<~+Bqsz&B0{QO3L17fF{z{w;}i_11XGBr<_vA**0yg4!Y zUgJYPM+!ggb=!r4%UVW(_|v1S&h@mH@HHd=)Uu=O%o-cL{|Rs-dX7AnKGOl(ozMRO zA3cYw@fzG!n)z+4@d{@3#}j;{?1F>U^97W@fERQna}(sfmx`#&9yHn5zmJnk$rqA> zKIn7YR|Wb2Zhiw91TF4@tw=pemA1$GEtRI&%eg+2*9SBK(<)BDZ*hL_AW;TUd8wlXH1bD5!|1k!(w=;zA^b!p;fiFg*J6Qf^eYcdwkoX-+NB|RrWC$(Sr}+Y>CiV4 z=6AVxUoK~bb^5p1b0m@~^am?@i>h5FSgJ3#osI5wVj5?mpd!pZT>#`pkMFb<@VR5& z1K=LBZ?=#3<4<^QwH9b3zf;LG!tBq$_u(~W<9OVnGs}(I{>{0uoC8h*V_#RF+ZMia zkBs?tWj#+-(yspw+Es#k#CX^zl%Q0rau<>Frs*QD1L8sr2Ex3kH1qLmvc7CbEXUUr zbXM)!Wi^MBHm$m^8i?mr(G&RvU%>IwVxkwv4VxX(4IzhJ z%^s)f)4zVrvEqCEQ+T`I?IKXU<%7fBbiFK9)FK(K7kh{X^z`)I&8V z{?gm~tdx&x%%%2d8XNf|s7QpworY^Hmc(rPG(by0hXnmfx46q#k})#O1r}B7tso_kO5VSy0P0W3#hCArL|@?9m3a~ zA}Jm^c!D;k_q;6@08@*CLI=V<92oI?s-PMORF+`xQjM}SUZhO#=1w=jP)0%61-ZTh z!;3sC(dF|v$y}jkDwxKGdVrQtaufx7Yg+V^_FwSMbg@ffqP3_~f519L%x@xQR8Y50 zMy7L9imO8-S*TAA`qa8kadWV!!;3sz#22xtGF21UJoP}F8<`;tp>dQ$edkINk<}3i zlwL03;bs+@D+(7)sP=f^*L!iAm(phXKrnkuVZzq=%^lxzFeSe8k4a^E)GSHPO^aEA zo@+Ou5=4FxfkNNaoGV@?6Etq+SxlZq&=p&lKFV^`H*%=;;qeEz;NMm9G6Y8bY_AQ1%%Lzuo z6a!!wzC?Ou&3mjR%(J^LqOkGdd(KX0kPHV$S@FHeIT^;0mCPS|fL#823BR^U>4$;# z3m@-rjRV#A4EC-p zY0dTZ=fAo_^v8n&b|`c9Fb@{_Ie_S6WfhsHyC4GjFj0luWod6L`ju1@u9T>y+q z!n_G|^7!kbf{XRin?n$z@9&gOhfigDGcK)(fTA#XvPcpTyv%S@ms(n>kZ}BJaNk^b zf5?wJT6kshe`)~`pszQQ^))IWh|e`lr;zaKlXxn+QlqcCpN0?iUN6(}UmtM{*R|hS zy5~gk15b^ZsM^!svzhGkU`4+TcD_5g%e9`vrF+;Co?0*iAv)NSJb!S=+5s26xH(@j z%T_Y#8}1#|I!?YC!2NA{f7IARxp8y4@n-;{5Wog`?(}<`6WN1zMYaAK{MKl7kbx@T zb&wOZ>-%(&zs`kx3=x!1vs4R5Svddw(MpH!Ty5;#EkMoIchr$=8IVovq;RDy75OD{ z?^pF60S+a9KkCgI_QWP@Ww9bdmhB!BeNih z0?`9VP{2nztpI8_z6_Ku;?ERh{4P%T7dwZNwdDY$IR&Ts!mBR2sE6vE2&bf{OwZAk zNuxYK9CL>U&V#$!ovWhIOO1p18YvJCNZBkSGx6h_Yi0;bR5ImFC(0{@F5@O8CO&4jWw3a@%2O{Mh9G*XC+M0(~v>Z3gOc?y)HK-~VS0)p9Mu-o6F z?HXb2sz{{CX&x#0X%Vr`K|F360DDzu7Neg;pG4V5ZY#|JK3+c9UQ9U ztBhNKlDPa?;`!8D!h(M9jjw!SfgBt#F%WbnOng=UAv4LH!U=eIV6#m63^n>*sxEem z<<+I8gXcqoT&_zry^QHboAa6)zfrQ%4Nd7da(?omJz+gf^{)&Ru(Tc0Ye^(@rXB9d zPD^U)P|y^cm@px*;0wTRFIOt!$3hho1+lnUjOHGOqA`EX@#f{`===|Z38=c9pW44G zr`KJ#hg|KvnTfm_3Wg-{D=iyl;ueF?VXnPP^{H}HvGV+y|S(8d+U6pl6rntTeev}fuaI?4h>z} z0}a0QVtbR*0UkN!2OVKwo_r#5nCH)jN<(D`j$GZsT6VdEX`5A7{=X=CB3f}m^rZI& z8v2<8x}}%BC|QFXf_K_=yEsTZkM;UTrFhTO-1~;11T9J+39JJyA!8D*!JB9GP-@8D z;-pC0(vqYCi|VAkZ|w-6NU6*M3z=#d0E$#Cn(A%4z&SPX*N5YTwCPf+jL6f!)qQh% ztwGm*4z7z|2|R0^8JiB%bY4tAj`mxP<`{Al0_Sl0LYMN#M_2c=vZAw8wrCqg5URcz zBFFyJ!y8^||N4cPhSx}h?I-E@BN_erAGWT{a3%yJIlGS zP)T-OK>1ydIG?41iUJRDR@XD|B&B+&P*0Q$^i6Edp_q=FG6FPhd??7ZAU4!z@z1D6 zl=(3qFP~}NFd|{W0Ngq2mcnaUgXh&5jl>HmKl+JA}CT}J1ohZ5WTo4 z?fQ6y1!&EIFIR*5?ElBODpI54$HP)zFp?2nkRTC8C=;b$Eq3$1voWh4VaLy{@fw>4|-DbW9*?}C7!NVYhia@Jm`<8TbT535OhO3miWF!*wI+(Uk_KTmMkt~uq(Mh z7ElZ29Vi@Z%=tjS+|RHYb4~LQx>4?y*mt(LtP zW+hv<6vt^$fPynS%Qo7o`zcVvCgu9;V#!HM?{0Tex*hSDG;oZJBGTi_neP`!t(+?N zQs}x4DwqtdZA1$zD2zIW<8wj+=rTxd?Y|0up(B+hOK;|zo?K02I;MV!oQlQIri^}^ z=n&2EK}Xt99!-4=#eQN~@L8bo^j)zD7d$7eAJO~hJDM)=-MrH-zE#`(cnUzxZ{e{?7Wd@HA7=uH%>S6< zSyXd)DqbUHFeSSk#dSmvgs5A6dE9f3=eM)vZF53n&?#;#SqTmUY9Z5N16U>9S-V)h z1=qp2Zr@Lx#=&`73?`9uoiSPmfA7 z?i)h{cz(gPTx3+tDM4lWlq2%rPyacPW?n)LTs{J6h>N&CmByLak`&+gJK>?fi^!$e z%WLe$))>md7*o3f_~`lH z>kyVTJ6ayAX+hEkB^SrmvDfJ7W)eAR}kQ@Fm#T7&u0Us1TYoJ_xXYY z$T_5Oe~8;l;jVdQ+;E<15>pc1mYe;*CMYQV6=tbaDJV$zP41vD`YXDkO{$m|yZ^|Q?M0cC$_T@DD=FJ8FtUz3Y)bKYxQpcS;ci(P7f zTN7z;Vi5*x+~CPtH?8l}0~!V1zW`tZ=bH~mUp9~?3)sLx^AV0%KLtNA(G28S)t4!& zO^(ewsxHRWw5qyu!}t;}9$jtkBA%_2hTcpWsC-)S-Og7U_vMx~jbOl+qxF8y_#r0x zJRivvEoKOma$@(sb8@sRQpx+}dc3SG<3ZP=U1N3z=*9s69|fQ-#*%(d0$m5f%XRMPCMh3%lb|e5)Q*m4MCgsfiE5{aHRucy8{V!r->B+ zMEP9|la-4UuvXXBhtFy2Si-tvdJA8zTmkAm=OgWs;BUR>U6v!aJkPIyKrJCGUS_>~ z75FeXVD|{_po0$e0jT@sFv8aFmcwL?h!*R|gQi5YDc@45&MO7Xd`tUWkox?pBE*0e37)_I0FUlsEr*{RxP=3d5k zkgwaR*k7#kzi+b*|MTg8p7nXnmxq!PZUqp=p@w{u?wYVy75>)ngO>kYEbtmH28{U2 z&@_(*lnC5i<%w@nSa^UD9O_?i;)L(|3*>{9l~zA$v??S__Sn0PJhBG#f^#5h98;9a z{yB!3()|ah1rh3h|KNm=ez)jnE%cw2h;9me02ODhgUB9p>vdl{Add~ALTaiBYdUBsSMC`EW*GFH&B(r~B zTd!}v0Ybv2`)(CBY{5YCKlwsGVjldzpZ@Ov|L@lmkbfWw_Pa7Jmd6v@$vv}PnpI8s z3>J%cYI%yPe;IFiu@%=1;6tBz*H`*#HeYz5|L4l2{U)*dT5y4zd&CoQ$oD?tnxyy5 zZGgz2ZrU)L!Jn+_EK8=~fN;d*|NB4w|9`B?0dix`JVNXL<;APe{-Rx>3y4%RZQt4d z`>SCf;6ogl54pf@6iR>F{Vy-IO5lGI@n5<4e_lmkFL&oq&+nB(%1ihKBfTKvMQ92w z8vl<7zua5O*MP||0^iChkRaB{$?3uE%qvE(m8)}UU7R-kfB)K-;QthI>y~{f{H%fpjZ4TeMH93E` zyvOfb;xSA0jB^gh8t*c11(T)4KC(_Y&_5Lc96y z{j#ci*}1>n_~;%>Ue*-<6`E#HrEuj`-pTI8F!(1v z;m2NsD;a02!zZz9!(yhc^OwUFxcm^{?hzKUQEQ|r$APmM5!QO}_J13Hlle+Qww;HH zf0YARCcqeY#gp8jj$r*+chEb)JLJXRh43dKcZf8&&9l?PbjgBe?W^8c1N837EqYif7J zeTnGyCvO?qS`-=XNkg7O@n_lB{R^t!naxzcJp)N~KFIoJV>eQX_}Ys{nC`M%!0{j+ z0PFfm>hVGLe*(-4`Tg{JG<)idlV+6|7)QVaojd4%<4FJZ2L{4YN|Av7MDyE)M9t?vM@xI7o-mZ~&1MP)fR_Q%aETl5QjpDBay5 zjda7I8)=Zve;@R||L=L<=Nsc2!@)rIVK4SxGp>2fx#oPCUeGl*S!|o-u74#9ZSZ1( zJ0Vf49c^a6rQ6ePC5F2SWzsU!o!(~yOiYt3-rpBVf<8?%tz-Fg_7y@ea97$S2X&r* zL>R1!u2>nX;v<-bG>5}(2H5v+oaVuA?ZABUCh@hdv!c3s>zUErZx!HbnPU#Gey1aq;WcyVfw|U zQ|2cE74 z_m4UQ@_A-JF+)!vMGtIU@Atm?e|5cF#w#OxMSBV(m2eE?Tf!v~UI{p(rCicM1Qf`TKp!}C#n(R=ZP^S1pNr5f4yV4jn4QbD# z5W1TYoU2fg-L*K?QQbxZbkE9m*n(H>)|PY+3xbpJVdkvxdY2U)tpuJQ{vX-&N&wVh z&1k38N))DlL8_-F=@*(4=}vue1!g<_ioZ8GDyu~H;`3Dj_$0BAzM0i>F=m}-{^FKS zW$o5Q2Urz$@j)Au)qOVU{FHK3zETH`EXr(#UXom|+0gw7db+-_$(S~*Zf60M~Rt$->h*2w zJ8hL!B?FuJN0n7D1Cn?bea5KR4%w;d5C1Z?g(BL(3dEjB&?NT8w@;1XppLL`X32(Q-DdRd*C!Yfi3Vh zy=3*6fiq8km;45(#Lu^Lf@HwP^6`7Cp&i!bt1(C2I1Nwsn(W4L4Nv|zYci~Rqt8%M z8GW}xEPL^d3bx=djgukMA1NID0w{Z`@w~|=X|OZ)F#4ZA1Nhn~2XlRzfmWC>{S&Rs z7rE{LTwkT8vBn!slYnHAh>W6Jvl`{9g*lYTfyA)N zSZj}DgOujjptM3&o*L$5sWnR`X570l%OqJUi2Edl(%yW*JL;1PJZ(Ojf_7obj@+VFbl%vi!U4F3v zqLi=k#uF$P_e@K;TY0$npbaYVOD=y+BrOWyYXx4v-GAZ@CIA9jsfu!?zuJID3ap3> z1ld6fXotzagEK}UNjF8wD6fT~642yoc^ZV>JPL$Hp4yDg@)MFPFA9Vcl7A(q6(*MT z8QtaosFpbFxhVNr6vPqI09)0{m8~cKuPRUiW^4`4U;SC+D*%&4qa~;`JONa_*!HrX z-D=O`cb&PQ)gH`%0FJc@vlan>NdMb0Jzzj+0}?z;0O8P`qX7Bb>wH=M=2@g1uyxVW z366i!Awb^50e(T9u4h%cl?qUWedjWVV?f!>kEcHu8AG8KkEC^6O?Mvb$A8L4b_>!- ze(w(9EuD*{@B0dvgkojJ%`p5CibX;|Lfsc1J9!~#0dI4OfHd^qCD%ZJXSXBbQFsk( zY`FXqF$|g!%RXzN0(>5s>pE6t)dM#9AJzmw6w|#-Mc-STVX!6;w%|$N{Itx&g5KOu z0cFooKL=PEJRL*_gnwico{bP<+S@m&KkEb2KfI#tA>pH%e89gXsg6x1Wo!%?np*rf zsPdmU=0O|s%8`ct!JiyxfUo5Uu_Hl8iUvXoaB^JgAIwJU-HHb&Aba))NjEXCsT2jQ zIFx}kn+b0A;}O=;$x6p7JUxYZyrrfe*evl`EyJjDcmSUGS8^U~Cbm%gKQ52*3GiH- z!)7kz1cz7vj!5G`rV#tbOfP`}!-)#&f8zZNc*zN`Y73#nfT}zC_f7emS?GQpQdu># z_}|F(KOK`L3@eQ-(4qZ_L|Q`u`LtxKjslKCe*o?pDOX?k(7gjDqqGGuzwlg?S^;4n zPzC$$(5x6qK-puuCoNu#P3lxg9Ye)68DRF~Kjnk#mM%#3jZXR&TdyHSNAiP&;uZA^ z|8d_JX)p=B#wkO5wgkLQ_=MK?DgR)3T1gnR{#NV432dyr140QN0L@C?Ke^n46vYfp zGnqh&nqb)Ee^?Vp%SF4qjv3u;=L4*H2p>dtyHx+U`0r#eWgn;?+K=`+B!>|GQ52Z2 zBmwH6Q;jqG2&R8NrYfFi!7VC)^XabyI0CE}p48%+*Z&2o{3nj7da&1Vbj~_|#V|+| z@U`J@V;iJsBVke4XyGo=UoV9e0IcZEK_{?62#dlDa=A_EJqxnEqavK5s_8vczmjXI_6L z=AQ`mUn2&5V}Q@=e5hAAuMXwWzwBqG8uFE<4MKvd6ed(!A0^9@L&2(zJ3~$0yCecY z`_aFk;}$8r_wKp6p%=Q=+hZTN^Kk7xYXlQkt+;uW@@DSGipTLAKyaH(`gQ(KwDNCs z^6xi2)Byirj4L@Qh7b&}$?R|6F(+8USgx<^J3!SR>~r2rFgk*_x<5D#h(!oveeHoF zUthz%lzntqtw+k{(?6g4UtfV>3TSEs704;|+JUx*0>H%@3i zy+RZMP#7&mn~KiOs64POT*8Nmto9D9w^v^^scug^Xjo)V=Zth_gZqZ-jV9_)^XCXt z3xLI3#fP+XJngvU_eRtoif+7OEsg;rc|T`}t2)L=*XhvM#6ai8@8JS!i5r~nZeT04 z2Dfg^Hx%t|pNrrDxpse_M-;_>wFE2!Fnn?EZUpO4VIt;*KaKQ{h}vzE>4yJ91#zRq-u0Gs_z}hg7j25B~)|ZMbTZxdZ#K z200C|(c~2A+J83f5EmeGZw#5Lx%mL?c^1fI@z*>8g`EG_9WVW~okQ9qP%Y&oYr6A+ z5J&a)BtdV6Yo-Z^(`Nj9RYd%Kew3S(yNUxD4s_H4AbaF%q;MASus^gABv3R)uKrAs z9}JC?5@Y@i4FF=<3)o~vuR*2k7l1sBL2uHw1^s=}I3?dvh;t~uJNle&2Zbta9J(29 zv8C}u<&aBdKyvz*FS~#c!-dc#-y0x=ep05{U;)3xgHHh@zaFIj36b;&=4dL6ojQj* zOaDho-T(}n&1_&1CN;2_uZdF2S&*1$i^iaP10#yvP=AT!@T5nTmN^svH`t$A(&Atj zSzqjBE3&_syZx++fqZLTH(vQw?8mt6rutu`(@G4yO|4~vWjGkl0e>r({RijJ0$YXB zleing2IGh^xw0gl0)#du*VokVq(T3QbBHiTVV+BI7-owUBVl$4=r#w~sK1uzdk~Oo zVMG`j`~2Sms0_&8018mSKVAz1S$p}|Q9s1n?SDgJpmGuha+E-c5Al@0&jUh+IW6SB zCa4t`*so*XxaqF{>avLX|GF&Tss2zi0Lu@Ho)}S%lO>|3sG##1LE-zRsk7~sLg+Xa4RxQJX#G#z%cRnA29eK*95>| z;IAbV&l2SV#N&aH=>-1|VDo>9guf4r!2@;?VDtV#+Y+!R!go%a{BIEx1~S#(hzZ~p zAKVnM9*n$G!dm+;7&rs+5N?b8cnF~WPiXsT5wb>O<~7N`WQgNK8za8{}p+l z?vV~iK&hf1I}_OcNrS++7?=dW!X=K#DdyyS3pt612qFEE}2zf=r(2~F`CF>g${_aFz}oDFZg8uM=2)@z%{TnqHH1k`)E4!;0#DYQF}FV6rh|BarLVV$OH6M z!ixPbq#Z!+JYXAtpNGKu9cb$zRFcNWpyqdwnk?3~xJzeJV`bEBZX{w-Wgc^%Yjmjq z8mVZ>3tz4=PsROuO(}E;oT`d<+?nhJl3MGvtKl)gc$gk_lT@Q(lPYWK^aUi?DKIA0 z!ctl-x1-Jzrce|$051TsR+Z;?07RCp)<56&&A#-A3N~up0VMjO^mz1 znbe&DGM#7e6T3EP24kR(1@-oNsHNO>2j|<~>{{Y#cwD3Zv24aWiFMM|8a>sq1*!(4 zgR^<#7s&E#%sCr~W=jriT=wJzglRdhba zDaIuJAra487NC%V&eL+9jv>vie2qm%QJx9!Ugujn^Lz)`qpHf9tMGuw&9vnxMf9hM zAd|wNYjPd+PP#*dBlDx9GvrQ%l~b-sk1JVb{^jc}wkN-y6V08Bk@Hm^c&)LiD`Ujk ziBOkWw7;Bb`uH^^njwLxMET`hS-IIxYq2}f_5_E|?t-oN)91&%O5C2=3tUHoM}en< zAEVcpbb3w&0=J`S{CxTzvBbH%p2_suWo50AWvPsDRn65|%K=J$ph9JY!eod3Ew9Ydaj>fo9o)mS4y$r_01&?oZR@ka-${lq!RWdcP(LIi%f_ zS6{jhQLDFDHQd||iM-3RB8k=PGlG|;Qh=ld+7By8bc(E^{W%@&)Rut?d#$D2bmWs7O> zM?Zj(N+4iJRg~3iwf`RLyYE4Q*Si&?^^QAG@e6|V z&_wRrb~ltI9(gCaAVgSM1JHI(<9*yks`GlBnhY@GAxl1$U;n!Hp%b{r6Tpn77se%5 z7vEf=Ch_gQnJN2d@d|OU*r|DZfBIEOx;DK*x>DEB@Aw{6aovP@&1%VAR+X|8?%I;D zSl%4Vj^_RkV62nN3$ya2)S)LhLvbPzT9a!Ub@gv<<*+j|+MWVOGnV(RHMOi=)_~EN zo5w-+GhDDXwZScWYBV?3J&W1*w?l{R?WnjC4ktd)iM4>ifoBnqbq3^q7UJgHXN4-- zjzw6k^{E;6Or}re6@Tqh`n?`KsxlPSH>+iX2>}DGOLo9NH`8{n7TXq zd53-R#G#2&6A)o=eQ7Mn_V-BkwlX&}o31v8ixuVD z8FdFl7wEa|wQKxNU2RW1pqvOhr{kd!a@Gc2>?km0T@TcJog=TVQ|CWz;_=bXBn!M3 zWY#9HiFsqe<)d}tmYuFO3p!g2xqsPV$(19Q5G?+>7wF3RRNm2j4CUDKY!G44{F}w< zrhjG@um2O|5CI~V{myx(qm_N@zVh0J_LY6bIV%#TMi5h2;9DPhoUGIm@^iA%)NvP2hyE3v2N_fr35poSqqqJW~A!@AR1FqFMNV)-5 za+K56xK#62uJSBGmBA}?b_cgzgpeg~OcJwksG~y^dQRS>(BS$BgWo8r`6?eVu5W)I zv{Z7ekz#9Bg`_p#v@5FAmo#T)s#mdcPxPAn){}nvCHD$S%HFcGQ zQR%1t63(2=ZSmyURLlDW_-qQ_N25h}o4=XhP?*llr1F0AsV3d)dU-bn03iokX%%BpAg3P61l7_G}JI$#+{sx%Ki{h;)CKLpVH3W+m<4@ z>o!abc^b~Nm#jMN3aKgHK8lMw8&evTy1Tj0qO)W~#W%xpI^>Sn1kX-J2)ED0P3rA$ ztH;byry)za%=zZCW?ZH_>PPz^$&b(^}R6YrW2@i3wiU&^PHo zSjep-smiYO8e@MnDYMkHI^P9~YIjQwkDM`y@B(8dU#+CrKkD<~Gotg|nWyx%iYTSD z%3*lsGF8CTxp%8wU2NPCOFwiqzc~${oge&k`*(p>elFkgRIXZug&{1WSJV8UgP!~V zWQaV#tq#84FJME_yK|ncGb4_vR4Eo-NPE(`^-O znb@6I;^Y&SA>J-*-&G4yqUHIG%-%|A6Z-q`CtgtWq zy+S}7YxX9v@*8vKZ_?w7pJ{rkQ(9TY$NuV|PQR0jvyu$GS1qkhu~mL9+KuwAvVI1s zSK@F@pYPrgd28rzYb9+h-rtFf1ZuIjYUbH}&w2f=Ua>N9M8d@+I9+^j#D}`8g;5g% z;VP+Xnb!nH>U6tkzaASitrz4@I0ays{gEJiD;{4V~HwrF`)KjXuczj(PC>i z`>6pBTo7ZD4=s32fWgx^!#6#|3SG%l!m+zLCpSH?`CvD_E=s;=p6%xoc(12Frh?yZ zwbz{V&Fmm(w7#rt8u>!4ktkaDHL0W>yzsebpqTjBnyN)^2eqHWvKlPvE6`TCGI0Ar z=CnCGME{IZ-`nR*aP@O@&1u4+j}KYMW+sL^Ij_U;Jpt(A?0l*+vs)Q4XcGs7CI#rr zx`WHPLR(H60y4*-*1QS*ypa8y@h}d_tIRr(d4I0u7yD2~eRAS%548PmF#-m>br*y5 zmmAaFYE@ZQ{YUH99NkrlhT&aYpZO{64}U&+4&1dwt=$R5lvNP26pZ+oyHw(n2NXJr z4+`3nzxCJ>GZB{$b|;{heudmelr=Onw;9KkuyuleE8bL#zA}<4p#+jhj?HWoKtJhh z#v+J~2;>iqXOreI@Q_Dqaa;b5AM&-uf{so)YCNmm(kMSD1|^&`_I4VgH+&q&{b^HP zoZ_d)*U(YU(-snDxq}fVwO@}2X_iplA9nxn1^EYiO?!39njS5<$fQeTC=UbZ4llN%RE zL4+izn6J$EmW`HQjz8zkSJgH6cdbY+z|cvFG$9b1I%n!2QU{z7)p#%V@&#;WYT~rd zPZTif3~>5Pp;x_oC7|~_mXYfI0$y}Yk4mhX-*J2RIel=i6)@WL1Sr|ZOUFZD4m$iI zDk|C-^e7-Iu%0l9l^!s&hJL;FW$I3$d99>k+zc0J9F2P~dqR1@GZHwc&=OsdkZqs2 z?HpdCoqA7<63y-#-E;7JQBgk^EG#VUTHK5k%_?(Zj5}WqkXIPcU?#GVQ>Dy60+{+; zk%3eA9-;?Im>#f2!B}ZWQV8?zC-=ss2D-EwUmU`(yO9xfRAA89v^Ib6z1v?{2U^;5lHsP2sVil5*0aZUfR*E)P3;=&p{ zCecZD!RZn7>hMZCdCmdCOA8jbhd2=lymgC0ZMD&v;y5`zvS-1CXs2Tnis$_nGSm;~ zp8UmlcFn26Y0jHZ*m?gvs^|K2Q=k%Z%?p2?QLMAm)qs3(^fMke#aNNGaG`*4UEk~J z^|xGAq!>Ch=|-C$ubVOBY;+SardSvzspufAoOvHTM^tqtff?rvf)ytl!p=^cZ#Bqr z6*fMw#ibA9e(MUNPq8=deyj1~6Z$;SM~-Q#*DOI4eqhII~m{|Ci%jAUR~(E zd13oOQzlXdx7%@Vi4RIy>D3_adz6yL7~Ceaa&&rKS)0nj{rDyPXdZtlxieaZ|*?Zv(jNLD+V__T{ zDGqCvRMqy#AUrN0sUl~&VqLWK_L(Ep*ZgcR;&Qr1H&86{KIP(EXj>_z<6~#N`V;6T zhZaJ*`{jb(28r75#Z`A=1IzRT<^m4 zSvrNK^!m%{r?_Dc@H~IGFa6Zl@5vBgkQ7kkI3!cwT1k+(1_z;GP|vt{(c_Z8VM~K9 z=(^0dV_-BpU->|ltx5BRB_T^2Sv#iNOJz61$|EJ@Ra+C@nbK1S(`oHQ!`TU-n39W6 z+|C=OW&835LtRwd^?|}e8ifXo+eG7mqCQuPB|zJKQNz#MM$IhI=>dZyv!+gsi|Ihu z!Kd`Y)YT>CG=xNgZofERkQPMzu;;|DIU4X}_glLC`XkBy)9wd-2giwS1TxOy^0e2j zzgO5k<&EmiMEwp+~4(#Aj1=$ zm33z!Y*bnc`smS%!rPOiOl+@bCe`+L6vP5j!GUs82MzmIi`H<|th-QuyR6ICu?WmF z0gSvqAE%vTVP=lMn>JLq0#m-1@uMKJoGPYz^VPTk-ryaoC+VAH?mP!Hq2NeMPPDB2 zp~r4}Kz!eUvpJNWQ^+h_Wj>6W__6zMWN+H`H$Oos@Rsc}*GZX#~n77B@^?5&VnSKa<4;BR>0TYtnUvy)&l2BwJ zrBx^r#7?C5)8YewBxU2-FD#;yHBu=njJrsjEE5$K0+c%}S%DRVqC>q-*U$1A&<3X%vRLip*s!rUq}0pU;NvC+veMYDbD5uwyKtBHyP zY8hJAMm_M$*EZDM6P<|sO6-&9+NPlaV3MUb)TLlin|ckJlA?x<;%`kq_@$<-)gkp2m>KzEN>sjxj31MU8ZpWm-43}~_6zRatJW)!PbAzHkhmR}iC zvoV!&xwR=mHjmH#nYt7B{MPgSj%MH&uUKw9yv}Z=)O{lVIF6ui(>LaQo}^*b31&sb zQ>lx|rYhW(!l?+1Y{IXWA+}|No!^RO1G&6Z1Ci=Xs^xTEOR8?iSoogzXAfn%b@6;< z6BYFgo&7bkYsF+{FFDw2>9xV z-2RdYk@0iN(3`$1POP<}Dd>q<+2VP?k005~1uTlgg z8a0XP{OnB7>}G=&q1#y3Qfs#``Q_OyU~8h~t{R5IITw*c)3{>0D0DXmUBiLd4)f z>4BDz5OWogSyW(oaH`0fQf0;083}o%Gg7!5W2#+xS9A7;y*vK ze!zM+60#987;Giri@+G%A=Qh$(_)IwGE<}QAvJ|Je5HG|dxpwnF#An+gvBiU8Wkm< zk3k}u;i-0QQ2d?=;_-IUPP}uK zVd^gzLytY*XcnWhH&H+mM#>?UzmWK9I`0b^S4jVSzo=q_m#8D2j02^Nt*RP8@xMyU z|9rZQPapHaSA#%}-S5fr$T!LL0oJSpHeGoS50A{kB)i9pyiU9C->3(dh?}vOOH-OP z&o6K`LY8K0Y-Yk3E4g!;R+H8nlqBDKbIe`be>kc&H|N>^rx!r`tZK#CY@hvRIirY1Xb_){EF-7IaXIQwS(nak`?D#nt)FB z>>tg?ysZGm@QWi7YMlF6oQ?p+NT?Xbo+4glIi(WjOXi#_)L3EqT}; zu8-k0lte(wym^kC=j1tCt7Y|4O!t&+Qtm^7jbI z9ph}+d7cRZZL>CzT7n_CvlGVO|zJBlbOhs`#}4 zo^5tdb~@?ns`osy&Y8H!p8>3*wP3_4RO_HAFk#c&C!>Oxd_4A&`AD8*&h0x|ff~ev z?5g-JpE#Kb)BVjQ$(v%RBoWg>HUD~kzUR&Jm!*FBSgQ8UOjlQ$x9#6t!UBWwI$d^R zA#DTCdmAW5hYo~|t)@0B&BHClbvNZKK<#R7WGKgz76T<=jg6(Jq`uNtF(+{H8LT}6 zFtpWHeX`+UXV9sWL5mUhwk4RVeFG_`$qhg$cJ-_#F7}=uouZH3QH(HO6iHof*S%ag zGg3S}>n*5c1WoG)*#ID+Pjbj?&ubus;;TM$`zCYozD=X$ecLlO8mwxsZ#DK62?-jw8c?W;HYPqsM=+2V@SMGO0a3pQDm zHlbGn7Z-6AAj9x}JN$&vBBqyk`tg0rC17pIlNa*`qj{L)10_|{FCE3%-D5_0i*MMz z;*vu@fwRwzH0nJ&dm9zz^bAn5uCeiELK_^j#FDto zc5J38RpP6GGDNN{aPtwo(mWF=Z=b~=#MQrIYpP4hs_n=en?{tOWN{cro8Lq}d0zA$ z6kUTVn@bhHFQVx8eqXVUz_CbGg>{7+d%e>}!0C~`f9hq$(FMALm%m|v%3wkSebwl7 z0fW2Vpm=TmK85n{HX+ z)EU;SZiVyo(Q0>2?Z;Je`qeT8AYAb0M~UU5m*>7;O=`M|^4uJxf;N0&6P>|}*-n(J zF@bw_eKMl^f#cV~pdZ^(sCI)BzA_KHK5F|1fcu=N=M-k6V z^@0P^HwwV=lVY;;(gt^df<*>rxP$f!jV^w{?LbcoGs0M2C;2mR(Zn_%WK8oIzeKkU zV8#sx!gvt6IQ1m%EJOw&5@+f|n;&{WulX(^C8b<9nMD5(zLkW)N6OZ8I+?9lW|8;; zS-;M;5D?I=ZB)zv7z+j#V3TWZEx6g~>cng>8P08e3UONt;E5^=YSAN|wuUpHAMsIKfw7g@d*blpxRJzt zk&$?N$?okR^hOkHoA7iv$U}ec_rnpxp|m;jeO&SLl7wtU`*(%}wgF*X=c^a`b^8V; zO!|N^0@ngOEy(%$%t|JezlxW>)opH5`$g`$@z2;`xZZYKPd z!ITl@!b8hd8oZ?i%v$6jGB;XUU-g@mwN1FzT3}!|9#Oy%7FcQ>ppo$R)HCfMFE=MS` z4TgQq=>|9IFssBB(|F!Q0nF2r{c`{e8Mu`j_*=CR7?S&`#bFyFTa%(@lw{mzq#qdS zzS5uby7Sq5UQBnDYN?pkMM_8?fj@HaE=wjW%o{ivYkV~a^2F*y;0wh_Nu36d^8Dws@%6#7FwZ4lclH1pVYgZ* z)(bR{%kdf%l?xb>g5!vjX4=BgYsDua9eEufaUKts*G|rDs&`S;vmW`{ zv+FjABF&IdDSNG8a`aEMjK6^R2t>NfHo{=GH1v3LYk9!+WUEW)=i6+!EVjt`N~(MI z^)I4ynn6}Znp2qur%WM^&pcAfQ*cVAi`PZ90Z8~UV9!m$A7@~D#b}QuUBpd{M?rT4 zdV6&Vl%bmpov|&52^beY`Wm?VRW|C0B*BjP%CGrA?m^vq3iH(wZPS6$I)q1+V9^P~ zWzF+R;h>7NxestcA6ls1?}td_y*|j+(74@PSpUl1w2l`wZ}(Y5CTW8AF~iGpznm-D zk});WAD%PrW(IUZTAE~!!r3e?Z*D5ZS#SsD=rX4@{$lh?7nPaOY4NCSblF2AojNobG2= zkr_@0{9CuBFHS3U`NUZ(R&!r3KCN-+Hu3QCo0!ZtHns5bx2P%HLuT&aGo{+bb;}~# zwSS4=bhTu%%U*DTf`;oERCn=nFmp@ZrN4HtoRP10+DeH(KqG2&yx}Y|gg`8bXYOq7 zos062pDqFcblc>&U%f@B(2VkUCa_V%7L9CQx2N+di3aqePftEi@l(2iXcbi2H9uwp4^ELI0Yn^m^^c10hu2bsgdZMn7r5%xl;XLu~tpp05U-YiHrd$v%(es@}3gqi$&KwQjX}J9S+35&zeKm5XboO7Jo_KzU=~B)K+FMOae_vS- zZI=wOY6!r|;Kt$2lJOROa#E~YmdRcCR@_Qgq52nF*X;+>iPx28NESBp=X=CS?+lPI ziv;a=Ub3L(he&pRiLT~X$Y1sm?+VZE`0};f1WgGmy(^43)PRelwkOKcQ}*aP?k<&p z7KHXWva)}0ucRC~J{SRx!!q^{qDJI61#rB-K>umht`e zI_wGPZ5x~;oB`4o^Pyo9c)4fs2JYXOW4}8$lnL8!$YIyM(sl3jtu+jx;alxVI0 zjC(PRFx4@?V_hQ8vn49Y%!J{ z=CwO+*t=u&Gj;@S^SksZTTQ992hLKR?=@d5r7H@}GUcILT!v|tV462vSbrBUDI(nA zkFq`Wv)!4ht4n+8b5k;+lM~XQ8blfvqUXv|UZ#f!m1bj=o77#_?7Aq(MlCDkU~ILQ zWZ^)0MLY^3LRy81;)LJ5K^eRNmZ6L!O%()-R`K#FnwQ$%Y|T`c@wD~O&F$?Aqbg^o zwRxbp<`cGq#O)cDz9DeDxR46h2re5wWj){{lMRKiSF0oj+?aBEHv#Xb#2n~B@W;n` zSaX=Xi3QaZz?g*fo~WlQ;?ZFgJOo`YU{-vE=Zou89+M2aqbMa#X&s$AJq*Vl2yItC z=~!loe&e3BBHj>uqj)RiQIEa`OZys)8*iH@$stK`YC*uqf}0F{JsCgtYLf^Wk;x^A zW~u{fIBb@Rhxov&#u-RcHm%D@^gAu^+xu?-#6CnvBv;6YHQRH^OO-~mCgEkcCP*IA z?)C@=WQ9>SNN7%K_$OT=Y`wfLST-((`-xnlM*UhJJ%%BJwa)=8E`&0UV^WQtd97w` z@%31ZNmQl&Mdq>!zd8i6T^P|wjeos#>^-)1qca<*5-x^&kXGY;*00MZIO%$eboIpB zMT$e=OwlP93L^`ApLq4!8ivErcAF%aZQJ9q<58a~XJ?7cP(1g|Y#*+~% zJtt9$c+cAJ!Y%Ln!yL@_Jc^`Nuo!zhqlJH50=to*)8hQbhHQ(HLE0^ZoGD;FpIWy? zA$0cFlH+_C707H-IeXn5Fu)st0nYWD;WFm)eM6(&RA983s~joglq=1(gHsL`oq4M- z=jtjF75BR3U|%7rZ@NnzE#_)!r-o>=04cyWbHBD%?EriK0?e|rcPk-8zNX0dR_o-)H7e}!a&QPEau z7y+Mp=Iz$<)6nX-Ju6fz$<@AIbhmg;_Wb5xaaFh_e(j50VmZgM_piPu;Q8Tx?yyvB0`>eMYgshja=H# zm`k{II9k1Task`yZ*{B8Rj|L?GNcqvMxNj6Az;NAdtuk=8<1}^?Z4O4qXl}$^z5?& z$tnM2kJ}R(%td^{WkqD=fxk1; z%x3_)mzah4nv}^ON;9?tklkK}kLy+!dth|w7Znx}oQk+U zY~%HiOuwC4puRGpq~Wui#XMkFy$@YlwG|ZuL~TES`m)|w+=pJ3Rt_pbAY{3y>`_cl zl7*%qA*jS0Gn7K8Yj$?zwm3x9DP#~T^}W!9Kru2k%T*Us0V1=EpT@?|AF{k|f_!TX zX`kTM4|kDiZM=QDY=1b%+oksiD~}!jhGq2E?bmwnw|Q${vRAb%C4FfwY4cG;?5fbF z5?`j&I~#1r2j%XWZqm5vAqEXA?VWej;^{HCF-Gi+xP5t(A(nF$ou14fDmu<6?x2|3 zaUCG!yOMk3(R%x{@2MbPt7RiuZ(vyQ;He6Hd&5=K;9!L1(ke~Ehf>wz{zR^oRj5H-C zi1{zxX7C$z)+a}L7dz|HWrkx0m|6kM^@I}5C!x%<9|e+wqis5beq4CX4I1ZN0gGL6 z?Ivb#ZJMx@1v1{54GGSmUUIk*6E!*=Qd$|uY|W*3d$wpdD9A#0TB*k42(sREefm_7 z1ZR78yOxb{m$c{h`BoIl5cP(-WDWj$A zJ1!Opke;_9_MI#mWB|sj-Gz(sS(ww6lb8H3FX9=h>XiOu(q7pZYQt;2;AmA$@KEE@Kqa5FIiMv zzpB~_t!(|FNI2)fUy zvP6qM2Sw8rxGPX0etyUOOXzxf;VIE@eGVw5#uVO1Cez&W{*)(dpG=B)TO}t2*}=O? z0UaaX%As<>bA&*n+$J4_KpN@mfcS_lqtn%DR{2n2mzHkhU{1ojPc0}#)7~VsQUG1j z3fh+Wxhh>)NtvNW?So7(<|mJgpWggAl`g~JpT~|{xZGLVSiSqdH^!3Nt3QRlrjs3B zy1F|-C8QK^*+aqfbPEgTgvWY6IKHSDAFHe6FIdN@L3G<}nA;ylEGb;7HXw-+8m;w8f$ZV5f_`}c2QS%<@u|ZUf|BwUAz#Q};}Nzi<{=XK^j6)MPfvNV!hPzE z=>)nMO@P_Q;eAJmP$}P{(%;oCLd1|xpS6aTYhVwmtL7<{?8GoAfJ6~)abtOL=$=6vF45!E3_tW2!f;+d4fbT);Bvja_;tpxU$42}%h&FSdRCK#Nf{!DU9oYdfF0cW zrZs}vjbQ4d7<|Zv^;jSy4ZRa&4f0hSGG*X7!3CKdEm9n3Xdz8dd9kpcm{leee0ZXu z`qX~C-jJq`8TUQ)VG8OTGV;1Bn}+vwy?X8GVdXGj60tHNLe9;7%ggXGt7JfFslZVo zm`{-DgNkUyCYWePCi1#3`?eLdL000sSSzrg?biBv@Y%a@N7{YrZY2XFMLNb+g9 z;RJ8)>ON~rNd=v8;L&KBGqm0vaf5h)nz*8t!z*~De3=QiuWk#Zp>{3DduQ>0p*<2s z^K5Bz{UT`1ZGq^4%d3!hwH&SP!iRexckGI|fC}X4=}FMz1|uK)Ui6@Zln##9i>v4v zy?-dvSt5j0#EE45ZrUC8yuS?Jzjq7txZfkYrL;>Mw48z`_Hw_Id&6#}vl`fRL!@9O zAZHP<9i!4CDK%LuXSOz8KEaO3s@1M+QoLX6}rl#~p%wr6&$FYjM}lLC*R+98kU=*=Q>vZRzA8Ws^PC zu}^aLcn00qP3s&x2@=PLRg@4-jXZoBQI25`? zSgUI!FLB!Ssnk_V*WcT1S>yim5e}xdkhGS7`f}epA|h*awYIOM@;-eH5sOVB$`EWK-CZQjLbk)~!_s#Tr`;7rpEHY*s%WK_^OP}JjCOHP2Eq6w1A7^f- z83&nU&N|Qm^D;J&oo_hL6>HV|_`Gf`J7dEAbLi<{#pZB8q#=|at~Sp~Bh<$TCb|qwq~1F6DP&%tz)b^_g}-imi)|Q{F>#^9^r*$W_R=i{Ef{Jr^t zG?GV7bq%hNURj?sc-q+oYhWTKwPF&Y>PmB6}+Hk^o_bXK} z7!I(G$4+Xa#a=$!YaoaOHNksf6#8;_lVj<3a#DzTt0+dVi(ebE5s`ZF3LkyBRjv{- zX(gp^GEyNrt^ZxES56cnGuK97E{FVTm+=TLR(zT-H`&%Vg7=vT3Wh;6bz=X0y+&!Yzdo8rPB2MNmKLT)?#A(WDgQL6l^T|jRbPOy>bK& zr>2@>=m~-4jk6b8@Pw%!E7j!u#px(#j%Ski2QFno6pg2*- z1PW!Hb(3BoWJN+7>RUoapn-$ySt-SCQ?8z`mN=0nEW0qaf9N0}GwC?WP+C3A3XD|Iwa zUt^_ZvA|b}-D0 z)X-gho67h$|GV+m_WzHoxBiOy?c%*@aOehUq@=q$rH2mblrHJ+l#rGNY3c56>F)0C zhUeq=KIi=K`~$kyux8KR?|5D5T&1hb%}!_9MX$2##WMN6MWDqq@N(>Bbns2uL}eD3 zmwvJKW1{Hez1hEP42k)nyf@lrSxkkwVZ*?&?=Noqw{JfEMao%a=fzduz;vZA;4VGFD{nG_o10Q|F$`>t?$32^$D>)b*?r#} z7!cWMWY;-f^mH`-+Bj5!Umfq3BbXKb(?x`8P9xG|}nX=)AB<(5j7Yv7mtagXXnXAc}mk z&$C&#?fL+HhhNrO?fwNQ#3QAgx}r^$w|&VM8>1IFHv3OMlmQYcp`U1w9W+fyI9LvD#Ly6uz(7F8XBf562js`sWv=$8i2(U3aoMv|GVXtl=5`PSF}-%tV+ugu}Z zx(TS2?Huks3t%!KOrUH-`5pRIK(2|$5UK&ctig@aD}Tz+?uy~j&+rE{umn(D;oU5b9cb}9>B2+_&aQQocg*qRvGz!T%+ zF-c7NdU zdYlOZJK3=FO!s60*hZr$bXO+6+e8q;wWV;TM?lvyY%97Pn~B0V7WM$+T=M_MqQCwZ zi+0Qx^yX8v6%e-y9p$k3QXN%Xq?`;VYN5z*6IJ|pn%T0JEn1YcdHgS4RLx+8p$ZZ( zWt}LT)9USjO0?C>5M)!j(e4?$$KtcooI)xtx{Z3ndaN1D1vRZLL2E0IRj-HJ++lEF z;Gm^3_h0>nJx!r3iF`Hnin~h;;=1ixHm{f(TeLd7Tg)PACV3@%Z4*Ym^D%+?;}a%0 zNU&3yZ7}#VKd(PE)h^EYv|aLOCU z5iKbX195ku%YkMTxezd~b_}orU}S=bMu;v8p=obVNdh zo^+LM(uEq{;Bv+79ShyTl)`cgx9Gr={K1#F-{w)UpkKFU_t&jWSr8a7^n=J3P8KdP z(!V(B;P9Z|-0UA2ZAcoT^j2!R=x8FD^JejHtnYL-oed95@8KQ^SLHpOe5+i`kMYtO z>HgMfv!rCR?w4z28}vfLq8~k61d#MS^sLndRXgTTVQYQa6;;5;r~|S_NzNJ8curUV ztN1V_aDLk=d!uo!=i7cWN!_$;8V|I*CP#c5u1~*WG9dp(pK?UGlr_vU(0XOj=+tx9 z;V&d^QG=daRv$6VEjAw(;qpaqxiw=R-Q_G->aY=~b$ZI)FychIaP8K>8HS%=AVRNC zv})k$iVcGDkE00Xsqw~POUK2_0zNJ+5CD2{R!=gcN?_G?FOIO6-=hwT#xlFZHr21n zuDeT>h@cOLor9YwrM?5=+^qGsbi;BM(-E|LL!ag{D6=JLdJC=@awuE13chB+C{7}R@B_4w{@Yn_fTr;SB z+;SIC3s8hq1b$*c^@*Dj?k#z>z_%=Z9AR=$9=T=k`cXL}7M8xVhhmE#%{Ga^3o;A| zbIdHltu?*%J1ky&;?GK^1Z2oE($;ODYRIU8TV9J4CTz2RI9&+GELx_cPVeu5?e`@G z1gf`QR7O1ga~9*w^O8k35Hzfk&g~$s#%jHRfCY2VnHZ{*`nxZvjt~XEgFujs0h;_y z)y)kWvU>v{x2a@my6iSz^Zv>I7c9!W^lOPtKw>0Ipy__}SmK8S4cm0L$qm^*IRk%f0zD8U(hhChOiHiT z?Ef|_GF8S-a$AU2Q&WSTk#WdJp5FWLB3V~o=rg|n2nn7+Q0~dzcAyzDcVE<;wGlH;WIfw`k?+Y{>xBAw`v(Y1&h)7L4JqdvY zKGygA@u6qv@}%=JKtiB2j|VKs1M75Jk`bBzyHx!%AU<`gT|{%P@O!5+lJJU!D^uho zE9y_&ZsQ)DQNE(~az|1bwk0UVtBpe7n9(Q1Z&5@^DW1Pe z?z@n11wkvM3J`I8Tnx@_$*Jx;=KN}#+8Sll7b8EJE0q_@bv79;|BT!>g*&Kl8|uv5 z8Gw8LDlQ9uaN)b(*F&Q;T!Hiq*c4qvBwl@ApEh;_3oRVhrQv){%nU)JD@*x4DN^XJUWwg`_{ag4MGV8LgcOeTtim+5L>Ew*DD_Y%^fa%q z8W_=J!f+rI_bHV)()@`QX=il)=_%(r zC^l*3*WW{lOJ%Pipd#_VDh)QE+C7_Dn`{n`Tb9Hl(CIk2S*cXw-B;}*9Xy^c32ahp z%qV%^u92prg#>Zk!wF=R#>agSY*P2B$2=u{ga5bWSvEV$pr?SGD(yi z65;5HnQK1cxUNKn9qX-_^X%rcmwM#hW}#K5o!#+%CJ>yCtY-`nvg zzD%b{d1o-b^Jpi7rWeBxkGUyI+B7J;2Hv(2DoW&YrM}Q>KNAVOH%tzJL%Lil>rmQ? z!iXONR+ha_G!lq*g$rS}#kJxzv?w5;NaLYFMkU15d)KE@3k$-@to2>af zkxVKp#7K{&n7m4R5`+Yw^U=90nwbU7y^5fTJ4R?1Vp()g==Mb_u`aaUvHS?N;n+-wU2cKItiX0)h!a99%wC; ztM*z^i&RtGkVVZt30cbVs50NR5ti1fkTe}<#k`b z*7)utZ?$76O>&2abjeg*j^@3|)?*KKt>qxS8247m>PMA!%f7mvRte~Z&_(`BI>usVEM@vLIbf0hs*{LXGZO8jvlsamLJeQ&_y5Kdz zN>X*w-_kPQLBT=PgHoUwh3$Db(K@t?N`0mE{ygq`Iba@PlI!z#1QJ7M%7Ojpw9S#{wGWsB6+I>@pNF0mLmG!KWsNv>taiASGFf+wVUwjQLs zJ4UMe8AmO`-)Fg>KEK7hJp4uGkB$c}Ry>K7ar7aTarE9zDxxt8UTvJc3ozB?I=&~ zA}pKfZyq8(a8!|!Scb&*CFVhAaHMwoe|eC&?G7>ELmgMlkgGZoPv3@jYLI8Ex6O+y zo&VO-^55TMzs7xaYq9#GhfjC!aB6m=!YnHINWUWb(8mscM7`S1H%B<-lNznIP-lAa zPdfW~_^Wour$D~|gg`h5IZins{tVb6(W9M(aE-w&99axc>vF{Zfkcq?A*{$k!bfX z_P6ja_D567I+U%n5N+F@3|M*)qkvm5m+A1LW$B|TFc0{EA-ePT6E3O;sF{Hqcw;m-^l9U9a=1kKgKy zi`8p4k58CSGQ=NTxdOf$jr#<@X-tS`AYpDS>aGKnL0~lLV$j`W`4@d=@G~89K+R28 zUC@GVBAONpp%Ac26@*)O!{b0Z1y?_5IHz+LRi!eEZVw1p48)&Xn0ag2V0M>muOX>C z$7-wVO6f~v?ZPFxo0Gr|-HUk|Zn}H_zYN^yzYN@DdDwlO`M_U}THh+ud@8%t``Dy7dIz6@Yp&wy?&D z_+=50ii3HeZ+ymsmWDFD_L&pbRW2L4*U6+J>s6%Y1xh@w zd7D>xivDw7b4_mn8REec*|ImCakyQv*ZC8Qt5V_)uN{H0+q#nL+wROb5=Oek;M-oy zAJt+-Xh&F#-speI(RN+`ttNb9g04nN0f^^Mz6A-;g&CFDd>bHm4u9c7b0uW%SVTY% zzylep!Erb=wgBQbVPd7J7FR>l&2`J4RKTqN?#2mJrQ9PjBw;C!9uptY?uDYakU--9XGH z?QXfZn#)ii;VFNl(CG4F|ISFzAQ545fvyguiE;%m!~~9Avt`P!b#PmM znjvQe9kCw^*fL|6YDQFiO!94g2-H@;D8yCFvU>cdeB*z`AsAlx8vZSNBPTMsXp@^L zU8+TP|F3ksb{%stOjX-x?P2ZdZ{&nO+N47A-zEtcU2NzuzgzLyge>?NkI_fmkm%y; zS-WK+$fjj4aD5w6E`t*e^Ft?EwCi|Rcx-XOl~BJwlmMi0?Bg*E%4g*4uc#yuMq}Nd z#}}*OL&_&4)&+x5#f=k!21L3xHVnFhQmhfG;3$c-hUC_wYY8R)CVp_mw%?Ahuxu;; z1DzmRWROBj4$U50}8nfmbAiq4uwXuq_NZxwYlcq$>*Eb zT^s-J8bEXTr$=usMtbr!0QeRq+B=Q_zL3fS2oAJrVwo}sev^y8fHn8AfBw%8KO%?s zAR{I&sqAdf_N8~_4GaxU==OKh6>?wkocd8rW6~n}kzrkkc8Haf#jZej3C^X=c;quf zh+D`dDOIO?t__6)!=O8fFnv!d=w~zg#FzBFaUDHe)fweNg;9;3+8yRnBv8HW>e2-P zS1De}Pa@v$6P(4dxcy9c1NPg@(O+CGXx@Qq%_YpC>Vx>_KacbuYljlpQub4_Bs`G|(LaU(|jC?NgT*_UW6`a#9V z&Yrzs?{=tz(p)CXi^R$T7@k0CBkW&RA&N$$+jIHy?Di{&eqKFYxC#!t*t%4Ei5Clg z{zp1f{jHvmfBP3SsXwLRq4VccI|a}ZE8Sqy(q18aH>*{3p5L|~By#268B`7$^ohMDfW2f7*h^-;%?}1P zn-%$NKT5~t0cr-{%+-J0(pB!b^>31>GOP4|V zFH1l!A)cCJc8F=3VtTG7VN_9Tcn^JfC#XWt>OZ;0Sjgd?(Eag8VBleY-1U-WI>m>B zP+0Frs_Fb$ebJV_LjjrHxVqy*h=9pMO4rmx8n!hMJMGJ z8Haf4T#~&z2m8J-i1xG8YRLqP7T&O2U-W;YDUwfN!o~D2WZm+X_1{g)M#CmSeF#{l zjAr507wyRDh%{1SN=4H*mZ9Kyb8#M;d+&IbwtBq+98a!tL*F;a6O*40hn*}RCq!{c z5P?kW70&EBrMwG^$-iiVC+Eq;)jy7fQ$4{zLP)o@>GhN|yYq!+T-()x`V;vXuFB{T8>@uo-B&4ZsKt{<%8nQ8#iaR7#@;IWll?3ED4a-RAFOD8|jYnGkfHTc*a2ik!8(08VZ$L1)K{qs?Rp!*U|qe^Ai( zjo)OmPoRi{uwjvF|A|@&-s#dJP8VZXC z9TisDnL^)d!i)OjGYLICh@2IOinuwpIey&Cg-q=<3C2}Znwb1;21H#E;@g>LxwH$0 zgsPQ6-GoLMz^l#{a*V!jkNZoUT1{xA0~0x7bzU0p;RIaebH0eAt%m&5kj$XIzr7RLf#0A41RP`hALm^o9nhAYwr zHL32)TO$UG&nE~%qEx<|&{O|_OzRGi10G|iKfAfnJmpp(In#?~>Jm#p@`IFW6{Nxy$Bj`n$ojebJ z&bpr&HFHz#67}ZXwD=z2P`FXr*ZYq5DZj1J-hv3C*J6WHR-@Hfm>H5cL&K*Xv_Z4y z%vJXUb+|E%G+qWgQM)%c`I_YzGokH=%F${*vt-+wjj+<9IQfegv#)Fy)qt!pe1Bw}n(!ox*T{&z}s zQ4$SI=astFl_mf-e+hqfcpzQB!LP^$izb*tu9T_7b~)T9vHUz)my{?TT9q{*r0*8! z1&PJ)yG4I%z2QbbK0d!zEfe@_#!NUFoVH$d(LHG-j+n?~&fokV`zxW=+b{Ix4sx|P zSD&LPtmk{7Hp9XGb#fhR@CZ(!Py8*9N)3@3v*Xp0dje-9vKu?y_%%6*hWkGO+3=HH zQvHcqDODY#{#dMeJ*(Y5F4fk=t(@vO%^a(1QvKc4jD5QoN3>;Go+I+K%#m2dDDPz1A?Z}p@+iw zk7m_D{|^G~how-tDfIX~zeW%a{a|D&qp7{DdT!4n1O;@EW{sJ==z_2mClGlLXh!5B z6q3f6@A%c@14yMsJ7hD24Zyt+3WERJ5bPR0#2d64tQ`4=okr2I#1CGh)nR`DVLT+Z z4MFiQ^01oUhgB90NOZkI{v5he$Yy9z?kImk4V`7eQPAgZmhA>;C*ZCE8*6&y5pBE* zT?I93qMj=a39>DeOmV>6JpkPmZ<8X*2M0Ak6es>Khknq7*D6#X(yr>h+bW*xncwvs3Wb5)A5D{T zi}8az35sCrmnR&yTCAqr)a}x}W~W&IxFMnk-&1O;=tN5f!t>~`H`^fgccO=KvUX8T z{vP=aQ#^g<^4zR^boqaK(cZtk=$ppQbi@8(n^djYgHAy6dJC7Ya_FOf0)-7I6dCcQ z{RhHkT8oiV$z)5hy3cvHe63ZzJP#tizvn)9XPkVwOcoZF?)ycasPpg=Lxt@En=f?6 zD!QoRnE-_bY1#Ji{bWcLqBsYb-^=42ebjdUArGZ0Fe3;^F*rm;F0QS>@4I}I24tDZ zF?QXzKlIklT%qQ#Bl!gEHHm_dzx@Mz+n<5a!Ziq-T!5;Y_j&gDYWg?GTT8Ruh^M#t z7k9M*9nsfj*%3+2_Wl}=A_>CsbsqP`xoU9riZ(sIIM7w!E6qs)%=z3qxbTZ zfWkU)d|Q)z6B7J|X}+d?y7IvBVDo?Zfav`uKsMfJ)mUKKEPL%ciAt&^QG_5d{On|& z{gM8!yW3wNz9m4@STg^;*>mrg3%<(k*ul*&L4)ldrWxPWhMthHHDnNCGW0nQ0zZ!aoW&_cI3>dd1~9VWE`8r9E6 z9+mXYOScCRxqd_P4{*X4bS{&Px998Y5ZRYr?W$tILu7wVZUqyPA$i(6S`%h>cQ}Vk z)ALP%ucizxlOFp?uEG(NYLWV=;4P9pBuMNVs^|~HpVjQoIY1#$K_$P%eiMp}9`D7B zX{LhLqQUiCPu?FDS;fCK^S^!Wk^rbCV+rzRP;#Iur zvHxvD^V=W5VC3C1xepp4)3f#NMo2J~n0^AHHN0$51l+YDRSJvM^~vOZIaf84S^^}V zEye|>SO1p<(9!{k-}yIFpylr5=Q2IC$ymb33M4Erv=#p`Nm27f()oATmY`tGwKnI$ zDP<5z!5@hr^k2q%!nPk#xTDkZT=ZL96^kx^+75N3gzes3cx`WyjkQ?>_*gb-jTlzC{{5Dl+idBXVw zJ}@*Idq3Y{m&D+;0F<^7+x3A3?2fM~Y+;iboSLQ#KsvRO{{m?8`v;JWOs_emH!$g| zfcx%pyYkaC3Ne0&1=6>6PPd?1q<5TdlkcdV&n8@{R(m6T-@VY%p85J7=$WPkP2*l* z_fVdb)Un#Gh)HmV`#4H{pF6{I+YuO!?1HgeP>~xb>x=aj+GWfjBCKCXJ@N;A740|9 zj71;u5z(+sK>})w83enGw&69Uou7M$l-^VygsZ>3MkH|R6o3xzz0zWYNu4M z8}Ivxc-LPf3#i^oX;n8nbI>*%o_*& zW8CNQ2Tm>C=jzD%BGD!*Y|LY`52YO`?tLv;WG_)~h9w*ErAOP>UbiyJIR)5{d&~>~ zT4p*h;CbIVH@@^P;&|SDS_%L|oCo$8#mnkIfqhS%{px`mm&X ztlA+!Tc7H$smB^GdZ|86)pNDEze+lNJ;gyjSnlR@q1jCSK=a7O=}WxsngCaKM5e=M zd8s;_H2*e0&o}q-W&0TgbJyJNtIUu}iy(12pXJB0%of#?j>j#*yU8_k;E1!U zp26PYOkWjK(hXz0C>H6Wq&Knm#o?hppI1U&3jLRe`M!*>>!>yRv=A2dndl)A|^axQmwE1gP6;EInx+V$WnYH@9w?k?| zQMdu6kp{7|?#q| zK)}?PjudnmrWEb%eR;iHs4|8-xPG|YP5@9pHX1>W&^ukH#Lt@)vPn@DmYB0tMwGgs-z z@V|l{sTZ+5?$7Q?2!CkSt#PCs`BYX36_CVIW-%K3@a2AJYYb4>#-9`YW)k#;lg$ea zLQ}dubSw39>B;aoBzAxNF(8YDJ%Ml;<@|gN@a#4|4rRyI`@iM|@K`SM^T2Hle6Q_I z38Rw`b#D@ZzK3g$K6;B?2ChgiIlyE?QWg8&LtULqF$ciq%z!|*`^roV{*Jj|Kg|OI z)HVa&69$Cftubm84}g7_g1BxBhR3>5eV%3c0!U(wxQ2hmk!3P;$YS;{^O3m8&&2?% zQlH566KV*HfS^I#xobVc*!wLzO_*TNMf*#W0n-cf@Fp>QEPlv6VU*fx9e{jQI$LS# zv!&e_<;qqZLCgKqJUXH&`#+aX{5vXuiWBb;%_{I_(?-r|Nw&3whZml=FhuK%dX_lo zuk46~yT_B7kJd$7=7ob--eTwc@(y@$>;?62Ts3kMj&b@1d@Usbksk8D>7>TOn&E8I zBqs%9$beW&IZ=mj*XsBDAXWw|@MTD%ysL^>dvmUEkve;;47JDYOMa5Br?Bj>)*WBn zmm&b|g0zb<-T^Auf7mbOV$T$*Z$+gXNr_k*BR0{V(=*cq3g#op zB3;wRs8j8S$B)Iz@jPw#nu%ZjT=)LS!~JI5>D4a7Ayu1G;gpYkTu#MV$!lBPZ17KO zlALZ#cU_{GheB*{aCRULT!}ihBW7a@9=3KSQV8s|=a?XfLASE?%N+03jCtK9FSZ zHb3RkC^hqSNs#Su?6WsY#bYkyO<%Hu{K%DDX@|WcUh>+K{!WeX;3Bpzb>yEb0fI`; zFN{^hNx)X@;{m?7I#a22o-uNGjNmMlbI1E)YxWRp=u@+~#q*O}3{)Z{jl1o3yW{#R zKZ@sNz!h|#64=m(D=gx}SWf>6QZM+YuS=YMes6o*MxGru#svNpIAJp3v~_c(o=DgQ#iot>8{`go^IM|$mkPk|>DMh84 zlN`T@bL3d6h8eiIHH(_bQ0<{1ieWxrT#yG1sR+xn()bDW4;x09f+6^z)OlOsMnk>!J7I!QOv zaJ-e@iDWdPGy+q98Sr#^t>?XMdr1bt{U-?cqVl2v(ZMREZ~=rO2vPT^>GmtyQhy8-_ZA`7-P;_5drx4v->qX@es7 zWCRAk!()lN^Sm(6y#k%+I@yN=f&?6NVyEZ3Q~t;qw3ujNkrRqgl78MNCaN=2Zitd( z2tkN<2NlS908Q}~+I`mw_4d<4I)Nw!^h(ea7S|0m4PjThu4{WtUG2Z(veUo0xFwaz z@6IZM4CV}q4sygKMO%cJ+irrKWnHxQO%$uiz`TcY8Q@ z2*};_5?=KnUgm!Phc6J@MEvWQas2?fsj{W=!Y?4xvZJs+7zpK08_4=N0vqt`nS~dT zF;7t;)L_D?SYSZO6_-#wf5?`5IpZoUYz`r!vyv^iKoz@L4g1`7wHf`a@P*6&_y^iJ zzcuSo^*sUPw=mRHgNFmeZK=fzBc!@hfYcJlKxg^sn)CTrg=(>Kh>u+`5zbjP#(ztB z+`pVdB;%Zv_0i?6r`DIX;H@x(9Bo%&x(IZstn|0zQ@$l$XIP^3N{xgOb#kUzL~FXa zI8A)+__4E@tm1#l9dDqv((l1I@urT{qoML9tHTM$*Ks zN!H4VKMNE;4`*jOytN(f8ZT2IbIzbzDyBvrl0cB2?;VcR8ciTUej`(aBdj`$2!@>k zy4D)}vO54Ysv}bR2mHh}d`w@s`r%aU)%?V}PdG9YiO{sDm%fmLbV4axCFL&7HX$cA zU!^!OU)R@@0c`jE=(Ax8*f`VTUzvm+EclY z))Q+6Bex&2rS3m+bqS>WXUo3_MnXYck}1Zv;4r$CKN$M6NkS<4avJv;IPPHIC#o!~ zc(1DXUN%BS)ozS-Bxv{Jx}=Jjwpu_HxQ=>$Tw7A~(XL8H8C#}Tz`IY1kPBB0fz9D< z@?7F=Zk@jCq5!p&5buxN!)?|wQ?%A~3>JErZ5uA}IhPGuGS|^j%2;{1g#Tcqq6c5} z1O_zJv4qNRKqlvBbMAd%ET34nP6tx#nok+tQGI15&%Y9C9Ld^C7$6~SNm|modNd>i z4JPF z+b~22Y4dmOEDdX}A_BFm3PueHYE+^^2HE*={&yGo&o)UnMz5!7Ld70PP(M?q;az3Fxv`PsuuqIxq&y% z^_5CCA-3IsnUIU)#U8R>d-P~w4Q$4Wc*++Y{IEM$;>B~MTS^6d8UYzgIc@yi!P$kZlx!J`GL*$|E;pHm`8d3K-7t41(H# z6)hHCb-mG$Pncax!XBXI=coo{2O;=DUf|DQ=bZFz3e4n-n)`hook4Qoe~BYJ6=~cC zBKFq>-()-8Q{;M^NMBg6ewYN3nApp&nK1#OQ%C&H@COnfe~;ftzp^t@(xXU(+86qC z!4pU{!4V`+tYhdt5C*i*=Pigy`6h)V5TJTQkwt5}(`Y#qS*D_qxj5$f1MWS4Z$ z&Xs-TfKW|mm(6Z-*Nk!(4oE@T1F_OTW8=SMHqz$M&Fhvg|Ie}Y`G*Mat-&Vc7sj5o zNA10a4KdI2dfzaG^ewGKUf%)6K{MUd# z!QUN(C9aDAO&6H>P3Vew&5yOZ6?xat%;tIqZ_ov1;9WaAdsK)pqWhNsLWHL>9Hbhh zY(m^D<)fhHa7i1jX2dC8)<$3qz|61$WkVpD_AFq7tg8*vBh5P&&!Jw&Ph6v3Su*?G z9aVBTI}IoEae4KknFMBi6r&I!*Nmblv*?6%qlhj=U|zN^^wO=l=2Fp44f9CwG&pCDhTdrA)Y+=SwN*bw|wpw)*5{wIoP5QwUZ%9JCa$@P&+ z-j1g?7Jh<7Z4Bwgm)>ipO|}84B$!6C$;q1*JpJG*F9z$P-VJ2|q+p56OBVH3;MMA6 z(my`2KUuz*d^o(!kosMVQtqw6sv4p!$&T|gz!!4juL&2yQ2G7^ z%2LPT$~@=cGv;(4UB^UOY~%{|QuWyaN&>qTtr)R7qpgq#Uq1uxBS%Qx?}R3{Fz3t# zZtfIYLY{Gr&+6Tc3H1gz5o2fJ4T^7as1++L)}=yqh$@z|>+U{nZlT3O@))Ko@(JHG z0v**^0-#z^^h5nyPqnX)x2!Gv1IGAw)?5Hj1+U@HAz$QKW)qbH&V4mK{cn%--pj+j zikL&mK}wIle&v;H%Ojzxc#*OMJC)>iA3N~)e1@~#3DeahQF=?Nk9M)|-)VLSMt{km zcCad}V^qW2D~XGN=d@FzykIDeh{Yayl8v=cZ~4jN3jAI~W4ffOdhoo372l=l3Mio& z$2U!A+)e+g>-JA!VBc+ZF--VTBB`eJQ2ssK{r}PVIDi2ORNiNYDWx-`&Q_m*ob4(@ zH&veS<*;ede2PejH({9s$a=ZguQljr<4Wka>23i}i;184TaDl+xE*rgT`z1Xor!!l?9pEmjG`&$Hk=lqQ?*{Zh?=4b_v!rnQHF#8l zJO7`gSg=jr2qYKHkE}Y`+5BYVbl=T+47*{DW{TqtaplecNmM`Wgv**)3(_R4(lINw z7gDNG0>YaW%S<^q3D%S{D5r0yBvm9oA9w}GEKPJ577otRQckCfQ5BLJAE5cVX*&m? zw+vx6f=wW;3q>RDN&@X}igqh)QT};qT{5X(bdhqm^!?7R744B76!yVM>j%ao@YP>{-`rtNJs;*VA}25mG==>#^N%l! z-{}cPVo!VNcUiReDY~W$4@qi(mJ|--HxKd1Hlv_3F(fgcU)5@xbNP3&u3Z0vp!;C4 znToWJu2tKux3dGUrB3hD-(3Rrrh^aT)LJ&fdY%yM+nI1+L^v9PaWP?a=6}r3ZHwRq zA)o;SsURkzpzgRk5SL6-^S#&x&9cHK^CR@o0Eg^|K@7IX!d?y#H}|v1aV-4kBLoeP zg0%>S4qD0rBC9`Spte<_aR2r@vu%z~kC)6rWp`yaFmBha`k;}T-%`Vp- z3I0llN18g^-`~{tHx*;2vxyY)jwO6W9NwwCT1DhQ&vW5$roN}v>>wTdoMxI}sh%Oj zUh-E|@O8ktrl+-1SQ~ibWgcVwet0S zu?3@%poHrssp!P&jzK&hoRc6RO7pD#g{+#A*5n68O9IE&gf?=eH&NQJ6kk%{TpG?QnZN}W!*x2@c_EYzfO;I9Tj5BDyoqV6?3NGJLlli&_FN^gu0%q40uth)pdWt;7ZcY# z4)PS~E5IKwSy|JBQ9=^Fah?mgi9wbj>& z!J?OM&K|bHcEj0TI^@IfVp!sf93c_X7dR$hP2hp7SZGG|p!mc$2pL*C#MbpDY2bc&;Mp zx%~SLn}^M$QHQta-UG^MvCpGp(ry45D0eF>M01HojKiVC-!6NiSzGOBJ(p+IzzqqL z3Blfa-XYn9F8M0USz0HR>F6CesDW&eKx^zb;hzxF3L5eO5u`dtWPUpc&Z3Qn zYO}K9LY0pQloE{sf!)$HBGh5@daWr-bynuKwnwLiPDG6+!zJa!JcU7y=3Imw&1~e8MvQiy(7l*IL^N zWO9+BNrgAvDWKLq=fJe)iMVyQ^wSE@VvXAr75O(Jsp+Ew6jG6VnM78_+xyOX8)=~} z2`vYht^>yH5oC1XsfJHoMIddkeW}*;edvcd0=NCm)MCt6k@^ri1vEkuH`1ZoI3gwR z4K<+^>15KZ66IYltCBr{z=V;IgZV?0b5R60Tkjth@DL`3Hg>|?)8^+K)w~BIf(070 zglNJ(W{ZU=gL(o57y@r5LzgR7g+PZRf)=<%>rg+|;VsMnZR(Mb%Wpr)op4eB-v7!0p_G0*a*3cy6G#;w&K&7g|R+=dS*cgOq8Z*-`>p$SvnK zT4kJpe?^B?B0g4l;RLI#OYo(%H2PkU$z-pa=Aw|`LrVIg+Y>;^VSP`NSeeZNSQp9l zzE9X?cwFN@FJMfC(N2k(XC72Fy4CX@u49A_NA5MILhjW=R_4Yy(k!}2$A5LnD1mkO zChdn{=LbFKyg&VHjg}Z$6LD@465#>u-O0E*Vt}M0!vg?wF=CI$ZS#gl`}PhB%3^}n zWIQ?Y=(~+VA05T3O=pQ*b}e`>`y1O{c)&aIO|qSDE(a#=^kN?IE{?y*T8nMI%;~XG z`4+7B&vA$MW}=&+3s=+TjNELk&WGozyL^Ig$D(4XyC)J_QsOdm%~@x@xm2cuanck+ zpLE2#)l7=W%7K5_ABUkHWv*Wi3km15`EIU5rm&X%z4_N~uQR4ez1e?Yo$zS`O%K=l z6TPttUvhfHMSKq^b|tpGM8<^(xy5Mral8*~OXh{RmB@U0>{Sw$mXtyoaZg#{olapt3VH}eE{m3l>jZYeG+<_VySo{Za{9V}rRVfC2Z zQ5aITsv3S}HGqAN7Ap7>%S^_vH9L%u+t!@2|Cr^|(6C}EQcm5(*-h=k0Uz^*M`yN9 z;_o)$QdSoWUM2gte(Z$LrRUo7<$_aejPKfJ`;)Cj!$=HJi2_BZ!5{yG1$;w%?qgGb zkD5V2an7?Cn2 z(dTA=qxdOhZD1(&4!o+g*NsGA*9@hTeQZo_q=E^~b6M|13M>C(Du5n;c`N;ET zolmAt5XcwALp)SPMFW}~5Yrxerpmpk55vR5sj4u0iP7#25Uqcg{Dx^0C=x8$!M))F z3pwZ;ut71N3XA%;@cqT6yCr+|IN~`$p11pk=qzH{+lC{@*R`PAhBE+s`$*Ygx<#TfcBZ28NN~y<`AVG{_}M74aJ=J1KQ3?lT*PI z^V)V1Zn-B~@7!#uMq1D5YKtp4VztW%Gx{x(^JfcVFB@{>HWxHj+DO7@F(B#35}IpN zj0qb^Wc4Q*+K2Rc6PjTo*cCFf?Ex7h970VR&93UK_Z(J-X~Ef?;;F}m6)Fni6B;8- z3Jo43g>ef}Y~2`jI*U$0CbJGKwK9%aDut9ke*#%r4Kf5K<+h#mzG7B>&?oAHic(+Y zkAc=H40dA^OCYE_p$eURmHs*mA0Fv>0;>?hek$F$mnItLUo#>zD8A@appXy?_VF%X zqyTswE9w`=fbtG9?EKM$%l)zd$IS>5gEeOh?Bl!P<-NWZYbvusyeu@U-{4gOZ2Cq*Z_*eJd|1X817|g%s)1jRF(` zSs%vb@4WXIZ+`GMZvC(G-YEW*Qj*u==(;9)M`IRZqps3~;(Ue0+Ixcc&mqOG$TkH_|22(%lHsDF{d- z-6;*y_^sn}-@o|Y_x_9~6gt?N*bEz4r%WEc(NAP|rWt<4 zCR(VN$6fYJ5GOykgy`GmxBB<{_mqQsJ===OqOxcV=@)pz#w`&l-yGJ5aTl&fFa@j} zY@LX_ap3H}iF-21MXw)ho(yEKKbdq+z775(>5>&F$-|Hs^SJ47IdUb{?eIKtF#;X- zS47YR#~m)?>vXHP`JH6nU==5ETyKWJPF))<#ufkB7~hv9g9u~IJ3xto0Vh4rAp%_> z42{@Iai>QX_C`%S1TZer%LX=t3_d8~#NaDl@crFvR9=*h9jGD}Y=V=tc$|i${3p zQ*=p*rPmDI~4GGz+*cS@@ z6fq>i3oh-9WBr&4zM%dBrB4W152!U}QPvlE;laa~uPqu79iG$9#g_XL^9}7C~kK6X!PC8I9 zk*XlRP;CTwZ{vwFAsSl5Q{ng!yrmbtu6)oUOzFbjJTujXhGiHzLY|3`@CX@Zlvo3 za4<>pbC!iWMzO8`qQ5ZA(Xu(6Zcn4Pr9l_?1Eufu7MN*}Tc_Drj_|PI^sNYvQ=MV+ z6XJz=X5c`i zI`%Aa)4+tjP{ZdPY;vo0)SKtdW@X`uif#jQi%&Edst@KCC3a(jg&h>&8*qF#2gV># z;E*OENk$kMgf4uE;d?csZ2&Wt#+sAyI#&#;+C}6YEJ5InOn|(`nXNyGFy<;u_FFdd z?*v}!q4+{bJnZoZ;!IyJ#sqB@`usSs;Y?Nsge3mX=u*%#u)xMh$Fg6^x}>^2Y8&nc zze$N-FVg7=n{ueN#Jl(l+E`yN4k-(7-EPoWw{)_CF?7Z*O-q8A{6=_uf&zYTPsw`~ z9~op<2Y*fd7O_I7F$z>FH*ZQ1_4hlNDpsvqXv0eZ$-D}XQUf#oH5rC^5^#h(HUyvt z_^ys#c6B$95cLosA^AgZmS!Tzx~>CwD;z|HJkL$f=HVzub2kze$}VtMddT=*!Mb%=yrcx< zZ+{1yKf%GX>5saJbReyP{uaR-;7j$5$yO5xj5}ktZ(qg57PM+Fp87STihCBZi z0-&*ha*zkhQGP$-gkAwk_k##sM3EmN8&30vhm4#r_H-z-o#sq*i(mep@G#3}ov>(3 zKI8;%jdpMG6)aZ4gcbeVdm#6Jzo$W#DVS*)2fGqvc#h#<(l*Angp5#ufO3i5B>y!M zf0tG-6bxP84DDFyC1h#&*qoA|CqxFR8<()TBz+Tm_l#ZPqmiy`WS-unG7vy^sbrU6 z$k%jfE=gdl|Hn0~d+@(EKZQINIFp`d0nb)p&Wy}RDF40=WUp+87#1(vLE$(R$gY{? z7Ts^asm_L1X(dcU1M_whE4tL?@J)l&)2eu6CGh#OsdgSBpA%&X zL`jDEMo&Vw6z*nr8UH>QVgdgS?tgwnK*6#YcD$@rkNL?XYHaJ=8za_0@#A7;2lG&1 z)fqE5mB{@6`m<-yDNf6{fv>-O^AO9`sI7BZP9y5P(|(>B83c_a@^|Nl%<(@{{l7nE zAuCCUuVMyE0hwmXP^RXQ|J&bU*g~dbfoUiE3D{ZTrnT(<#|{79UJ8g3P7Ic$Bd-ZO zuyg}4^qyq@`?>#IeS?_4Z?J41%I`7YNWXSUyzK3keE;t#mx8-kJ0B*Jv!d>^hEL?p8C{6 zg(Wd#w$N&cXZR@|>zaO|j0wPn?j-)BOe3n^p3r>wjs{u5hOg(}+4@We)JH-6=5~Wc zEZ_tRpCGX9f6RIlWS;Q|$BbYpAeu}G&B#j#4gEEc2--qjC)Ad>FbpMc(dH0CEP9hc z$Z%u+hWvTft|DS?zawrotE0^W2GI8Izu$t1HQBuQ@sC>OBS6u}OUG5V6eoxWoCFT7 z4Ojlx+{B1^V1RAX!(r<+L7)+2$E=Za`~uXNs3k#WUihfK671b_*CDpVmLVqPzS*p` zX5(^kVY`Z-F+WuP_U!0O6{Zv>1d2+yK5vy18B}YgN`m?y{Sb?R>?j1>Oz4ajz*ry> zGO$4WkAGH`J^*5@p1E|G>OpKq3r}Y`{5Vld6^E>pU~kh0y02AMpKOm9oLf3Lg$(uV zDYRYVccaU{uiuTG`Z%^n>dlA)n>GBh>f||4e0z#xvVWCQtdEL58}94_L|H{(JRbg) zsem>xy4<#--u7r=h_ZU~#Li-#!P_A2o+Y$7v}n{DzRcWJ99|=-)ZV_u zs!UbP@VoGlf3Z|>RR*T@^+=&oX9it5N6UrQzg7=Q7LiSU7*kJ=4<2*Pn+!g7Skm}k z@iaz+$aXxtC!KmA7;r(4lH-K@N>?ljXJ)XeZhhq@0u)IpHtfyMHCSerk&DpX0Bj&% zeB#j~9P06q3$PzTzWI0JK4U;6hJ|kG(V+*RDFiS17X0+g>s$x8zS2yO9=4e)Pj zEwo6m{}G_?*YIFHVCI-p zBYZ^XXA=hJnYE2{+xoF4CeXn)^NzOpGI2LrmkE>jaqSk1*v(0iHG^&sPfsjHW^1Hl99zvMM001P=lH?y+WH(DAZKhdva$Jl}VxGXRy8`M`y ziZcrw)>IZu+0oBfXS33W`nVXR?J9zA3%y1!s0-uc#twmdDsYsl(Qck^q(DACXZEj< zfIW+f9>K_QB3%mTXF3-ndepzu8}S12EY6~>z<=#!zc{Za3@=*D!z#wlS)0Qf@c`Ka zSSUXph|HY(tn;tILS?h6{Me`ilLEN_D+-aOe}2 zu*nV2LX!vr5rKE!jTkhdEGQAc>p%rfbu4EFs; zT`-&>a=W864C5359*YLGx$&w!vv##oE|R(>q2HzV$E3F|?u3-dPWNNp}bw2*q{&Dm+a{hq;j{up|Vf$-N|(!c_6PY?p4^!jQv*4b$`~U`)g5`V^fMO!ECWP{hTNc({Z_s3v=F$v1-?= zjT4vOZ=m%>yaOlA$HX$6%nCC(3Y|^m{@O+!WTMU^7HR{oowXOOUi%zo!xi_6fBzEh;>*Rg;UBdzW6%A8OWyEFoY3AKAQ3gLd@Jl{=7;fiWq%l? z4>`q2FQhl$It7!>lF3c(aE@`+-$VxG z`2K!zwLcZ!WWRdMS@Rt9ihZRjEFBDwn$PsM?q=%{iH!8?<)mT*XqbF=)V%t&&i>0b z9wEOI73gR-%$i^|U-OO&1Zk^VpHvuLSI9-b0EhCR>YUG}IbG|9r&{I;Y>b@O3&YkDx_QEqtBzJ)rSug8FiI>*1u|DL~~i7U8o#Y`1>> zl;dQ&7V}X;ubcJo_JJQwwz?nror|<}s8pZWc&t~w4w;h(%C4ys=eCLMb!QR2fkz;Z?ZVa! zj-U1GQjJg=l}~bT$b9!O!P=5Of4u=Q4v167(Y0Cj0rO_@2Ie-c{`KUz?(fOBCa=WN zuba!nueqxfN~cyz-t&ueIvr!#ero>c=F`w!Y`5@zzf|)vyu|0?Ks*5(-EKwku!yDp zmvf#|^Hgx9^%OHaCS5wP*G6hlN*RT|#gnBlgF$v*_395ai{2IHjvT^O8?-28t{6%# zrd3SUf4}$@>inwk_f^SSdq4BVOkeNf@2NCS(Y#eZp)#6G$0b&3>(pX|?27$}7(7;@ zmO(e-&#XnP#n|^~-tWS9#R9#fh+2e=<5TKAw?n<4jnA*G-G78xlD-Jph&C8CWloPq ze$nJtVr#45YGPF_RG#Fp0Nh49-S7)yY}Tfx*7rfm(X;P^W^QV#K2CWy=;ciN)#9d< z_{C|dk8owPrq*~+pZlD3bzk%chw3vC?vCtS|B)3GxqNIlami-I zLSB^P8pD6SGvto27i~Gh4TRfF*{g0#uV2{FB0tr5i@Ms*uKc<)sU+0c z?U45_dTpXU)v;I?TomgQ?YGossLZp8Ypt0_quQLi;l6yFR7`t$^dl3^C#Qk8Z1-&z zuk{~TZky#k2F4Pu2o;|WV^n3{JHOvblY@x)#(@-}dn@6?>Wg+qsI@|?or|AT_$0@t zE$O2KMMNfBScLD5@jI`2!1e%|A#qzyhJfUX<7?xgKAJJm@vj8L&6~```D~V2YUgN@ zwAKNzFj1yePUmWMNE!l^Fke;lak{L`jF2a7wo3mnDIAr)q0r9j7Ka0i#QPV!*Gc;| zqJnr{*sA}ZESXqBGc4GXmQA#U`uD0O%KAxOWT9Qz0F>rM$|_cQ@qFdi&+2cDCPx$# zQaGkhxJTb=@qxuPT%E&kN6G&=FurynS{PycDz+ zM@LY_9sjikZyyv$mH&+StyT#KHZ@M@Jc6Li^J}V_ zOP{Hs-}?)Loa*mfny_)YJJC|{?m8;JEK9IVvE)C*bsqC8DH>)-_#1Q_G#^)@Cx34B z&90$7da!<=c{IHdx?7JX_FEJ;FIXo=ot&_R8evywBa_0Ue+E6*A~5#Uq?V^_%dk3m zPg!Loh_EppHMa}@DcGX=O}~a_n>)&g=goMI|3qD$^WIufm%lZ_yVD5~RgHJSUkQ~W z!cmv(@hS^Y;pKlAc5xP;9cIz#uKy5L%=7(TYgRp0l*3EoX;#x_ek_Qr9;}^HOYJJt z1NICm>z2{k1dqYK;L4-pN~6hwvy3%+J?m8+1;JEb>S#7~H3`gA#BqFkg4^4T*l<5Q zuuG6mu5E@GNp@&RmwqG4g2vmu-5|}k_{6G)(I@NgMk-!PS#_f%+*&P<%l*xMX;iA4 zvv^#idUkxGR#JXt41~@l#2scs=o?&SB|0}{b~xJwUL_TOTug7bL~t&_=C8O-h|>d)rWp&^wjQsPLVn{r&3a6gYfjE>+E1e#J}FsNf3 zzYyQ~vsV!J^)A4j9cL^Gm(iTzG5@i;<8IhTv>Z;{s@GfS70v1>H4So^+-)47G(6uW z1pVyZ)QjmWp(^2MP9Um20PLFsDd-J)6gWTyN?npx4!O@I1`sj@W%Ak=5prAIJm5v) zsAE_)W(nLrIWp==b(g*>))@Ri984@?yWE-rT(^$g5ohtg9#9YDsM#Ax(Ts^07?Kk= z^?<)}NueA#?V{U1nS3`uv{zhxO<;F1Hf-*cLMJyWBPu($8m~|UM-+(C;h81tS?ZbB z{@}i<0P+i2iSNuwv-o`^c+#$`bW6MA-!o}F=N)~F;psShWWUaeOD>KH8_{TayU;wl zD5%{2WLvr?>He*$k^&qgB&g3V-=pPx{z<&z;F6~n8MKS4J?ETFywAOwN0U}<`Y!3E z+g;kK&yeXgSA|lF;OzDO>K*-v#r@=!!M-%ZlJrmHtz1TYzhc9VdwB-=JdES=*(C4G zs?M(rZ$I_j!5=?etxnKZkDd{FuHT=0W(q66F!A;)O1e+txmSDE!cDrIQn&hXvHWXN z(l{+k-bt!Ib9n&BXB(R4EoA3e^JiO<&PnxDOS0imeGXo`Xgnduk)y%6>mi?LK@Ar|;0p<}trqrOONgJZG0 z;Y1qck8*kpKW*!llTgWdyAb(g$Tagot3Mdap?GqGx!`gS3>KGj(rJmjzQqn-Zvn5% zl8OtUd6R>r{!HW2Appya>>3o468A~R0Go%F{b_fvzWeg_)jG^Dh%X>lZWX@u6$4bK zt#=1S3lm~EQlv-IKYNvN`qCUdI@)hGqEpS05!6pX7+bT&;T`E@!8Y6<#Xi3E?3=s8 z>TB(oi@#7TA|u!~o-=-0N{541Q}XH@gk{xG8O7K6K^$iN^-zPl{o<+h9HBDf2I@$^ zUAifN!+q98CHK#~n&G`;@a{<>w-;AoV&ibanXG!fZHb@lr+a!^zZ3s+os@;SVs9et z)ZK`Hr5iiiaoH6zq9DX{5pS8d$1&nJI+O3CsQ80yrY)b06wUb5BO&9p_SWRxGNO%_ zgb97#Y>E`+P~6P<=azT>W<`?Yi&a{f=D`HQo`6{_!p26_`iU344bO|)m)o3(l@t$k zo3uLTlT?lh>A{VLR@>V&*EyX}MYf6^xvcL!o_==|wMaP(^0!~9$rJFTY{|G3*@QYN z2>CD7B#Dp+=w=58y!AKI^*p9>Iw&^by8gmmhir2?AJCLc^Ehfw_+)q0KfP`6dAw|# z`}5=#X|gLuWb&v<#u8>_(}!t6npE2(*Lpso$XQQgUZoZu|K?4(+ZgQabUM0txs`<7 zXl}m;pV|THkGmuNAs1YgGe-P1vR=*kO8lH7&gDa~`*vsty~x?DulJ-+2(=8G*RH1v z22QYiLMVGuoYO33_HJR#Trn4vd=u*ME-N~vTm4)p)lbX0i7a|@)kUh@VKCwRSNsD6 zxP27R*lk-IE+>g#_MRAUO&5OlU2wIovMtp46)0u4eqRg0rKcK~Hhml(-)}Ss{{E_g z`{oFfV&u1nVzyo~qpH!;6};cwldM+{p@4xA& z-HzviNv!wPF&?JW`c|8#I1x-u@DfJSSSA8gSiilDIjN-Pq*Hwo;u!s_q)VdRs5sCO zzBj!V*+R4Upf+_qKBj_pPVoH#D0(yGcf8OTt&qw@KQ0Zcz|6qzu+bMOVpp=3_{P=# z0qDYfD;?g^!62x%_K`u6g82Au`YFZbr-fHls(u{DP?;foCN z&<=Rh0ZxGBiW!#cWXNTmaebk7;2fCLg=Xg5LI}^Ufet9_mv?JQ1r69^p7+8RVVcVfcR!Zl*~b^XqhI&$UdBatPU*%P zBg2||^2_{iboWy!XhAo#bj8k+ckHcn=1wJ^*53ZlM>>n8`^!z>mCaJ-&z2iIbO;%`(;6862v^43o0j)`}Jn=+cQJE_Jme$r2foF0?aQZ zc+C_Rek4TdP^I!Zxck*b&c<9v&K zQq9Zq+OS)TQzlX~on*88Tx#PF*q$A4BnQUvt@ZLfJ6@AM_O^;m_vfhp`)v7w1J}NHn@cSS%EO9MXWHgS5FYz^tfnCuqkDM>3h$k!6 zdNA+Z^Fr z@-qO)eFF}QYiF^_fY}_zoezNIewy8X4&VGJ5^IIjbq;cwKim~^ldFoCgq{zh@-C50 zRZ!$DESE8>n1ER78U{ou6Ul~xC@Kuv7hHIm#vX~&SPfOK4!xJIgl@NzU|kKk^-%KH zwnx&GU1!9`sT&R5&Wv)_J@7WX@)*?LDF?S62y~IEvn^SZ`97#g!e<~rZ4s8BL}83b ziL(zlyv+L;c5Tac!S+&0To2{qbb!d$Ux<$wefD$r<)USq$}Ha=jD*uYl%UaQ5|6D; z@$wZtsa4LB98TYxT$1~pO#9(g4H1|pR&wzBc!x7Vl<_z*$iE-;!>kV&vJM8V^HtUO z`|Ksq{-U6Ii~s=jsEq%xddS4dQ;+V`J_1Yj{_J7e*vFmKI40`*ESz;A2VsP|zQ}2O z$+VN(+2f(x585z=OhnuFmw8N{_0g}YH=Fto_rdYoR3|QlvK?pDyunjB5zoHbk9EeY zwvee@CO4h0swv#`%~^(*i(wLj6=pRbHeCB!i#e_0y$bhI=P_@!cE@%vtoEA(#8!o6 zXT>lDE&lSu%9m#(NB2IN8E|v@ql0cK{nPw=aSjp4k+Lc;q;$uhWC}2|O2qc_>zK>s znC#ce%04s`_AL)ShpyJ@vY&rD9ut3@d19{>Ok$xwQdVM1h@-yhQxW&Qdh!cYis$!> zv-0n36s=CGl)*JRU1U{J81$L-mERr~BXn(v$J~{e7Il_r^IjHQWf*bKE;I5@9XQG> zh>+1p>Z`%hAR|*Hx^LYY$C8Q~RyaoP+~z<14svphK3lFpcpShcyEqqHvlCTb$BN6 z3g*ki5rVu|T-d;?HNHYkYY;bqu@Qy7$)) z2^Jv9e!5JHaq;k7y{NvxIKIcp*NYGP1NgWu_yh9+!&!pOwz@-|rbBd~#Agb}`>|h* zFX9wNKXMm@q9oyYf`%joXPbkIO=fA1H>dA(+T57Me_~`FJn{63#rMgiBhFvC*S$bK z@Bj03Eeb%1IG6J0+r@*Vs&_j0rm}P#2T;dWpuT|2Vv{ZBxA;9;xo1FeioH+T=UkXG z7`0j2KhE(F@S6-ghjs*MV_7@!lvSvqXf5;=YI)GzaLxzQ+11xP_A5$c+}ZrjVXrIR zkS|(KevXuX6RZsCdhJTriDkX5H=h7>kS&44hEom|!sSF_y7WrN&OJIg-DHpXw%Q(0Jlw&tohD}Yk zUiG`Lw8X|R3>D5Do5_+R0!+H-5Y zGOB)`?MXxD1jVx`4j-fs3XC#*qikMwBU<*{&4LXrA5IZwF_EZk!x#Gt8j@44CDVEA z{UD#xDrg~#xb+xAAj(Zex#EuB!aOB`P*d7hn%8NNgFHeS`#=L(3lL>E=Z`wv{CR)c zi;lg*c_X&g`e#X>Tov|8_?fXDI7uTR_j(|76>==})z_(H31}87-~x;{&j!I0K)XDv z{SxE^fG-&isW})Q6T8cv{io|qi*4A*4rm}gQ9mxCNOstM+Jj=QJ;AcFND<NC#||#5l`eFSdWPk;tq|E$+@hiBOo|e0(D$^a1Fvv zm`8K38)zENIcNGN6J)VVr#45gEu%^Ee>cGhbu`=_&&jcxt&j&DCAkN~+(idyBtCLk zes@JU=EST?)qv5YpBN~w?oc50ISu5ONgfmqLiee$Q-e7cuD}dLm?c2$LO*0PLOick zDAGEN;+xG-Lo@936JF1t5mAv4nbijTHqIP`H{~UVn(Bt4`A^C8ufB8Fw~Sj)m%OVI zJ#$pclL*}dAPDWl049`gSXm3=H)>>K7}O@%eRWiiF(Y9cd@(U)Il0qezoW)EI>Qd1 z5YLK*vGRB~EVTSwT=)gEEla1!pt}><2?t~2{97WmPm(ige~2-MWwrB#|3eDFK!%)k z;JB->nV|ijo)DCMmM+`}TdNxijd73W(ERmUjWI08CSqSqPL95q5_sLf=g*Pw!N)xS z3yrvVBohBQZ{_Tk$ff_t!#EOyGsWdi9~tt~F7gXY1l0bJDFoFY^aYhzS|e)fPR-Hz z2C^-F#DDgpIgaw&TuW^@?>n(M08-_uOnI>Lx*ZBY3&FoH*!YR>jqjWmnk=$@O5Od+ zD1H3;R@0PW z5!0>nkh3sH^~jsMqrn1mr*8|_uVP7*zk5*Gjy0*&1?k$vu1FmaW0!*qBa1&Whn(G3 z4UvOb7O&gZIS$XXbMnTY^Tq`+L0}~1A;6E4)<}vIR2$t?y81^xKQc-v$qC+*ksJvBp0Y?BE`L zGiB__QV@DRooz!GGIV^@wiim9lf+#N$jTrNL^ggUmqv56KiGahy0s8{s29M~ae6D=@(Id`mtl{#j7Eeiw}Z&!BV{e3nBb zq6Q967e?Y{t9C;r191=&4UeFBMX=&Kn_1&mTrZ(aqktcT%~OH_Y~o~E(lZW9O|f6l zLujq{LN@0?>GYZel?TRC7*Ry?1Wm{S{M}{GC)sG?nQ8)~YCy?WMSMf^9sfUKU*zf5 z1D;(EGgg=UOm(!87|-Nor7G+OthwH$5}Dq4oBkb+p%OPJXUvdDP&?WScLLX^CU2?M zM7ElPubZaYg4Bc<1foz0);x{wHQBI?&>M;ww4$8I*1E36*h)k#+||1n~M?FT@SBIQpD`u5S|09+<>f&GL9cUBSbF%i*0u$%!l z%Eezc-uslP72t_X;AN?hgSHUPQ^1h_=^1G z4jsb=|J6`Sqrm|0L@Yk5VwUd>OaNlA^&(un){hR4@k4MNs}7VBJ)M9?vy2*B8}fz*StlVwEb$Zn)CDfyEV5d|Yso)PkILTflsD{SL*tmTy$gJT8hGK3GzGvVi%}dn*t|+xfCAN_J>mnFz`njZl=>UnRt7y)z1q^Nzx6)V}K? zg#TSvPahHPffumEc#N)xvSoTkjyx8ep-s2Tx(4$yr6jWMNyG+vzY&C~MUQQmAVJgm^C_IT5;K+uz`}88Dj-yt;dgRneDJ$Y`kC_m*6GKq6A!u9 zDt8FR8yonmsanvspCAyq>nH`jN>;(X>hpv-Z!2@d;ZL+g;;836_U}h8pYzubp`V*} zV^I3*bx5RPv10MS)Z?E2A%Bn!HdnY#xCISo1}L*rv>&fe{P!k*DqebTcn+Jkw5udN znP(Kssyw>H?1=En(C`v1r<^A4RK#s*7x__o#Rf z!SBJsRr*)QHXlTnu;|Ww0FwS;Lcs9Dk$@bEw{kL!r;`Tju%ES^rycH#`ymvLFA|Fa znjO-v;M$Q=9pk`PImc3(x41nr>^A`h4hS$eWlfj6;`8^(E54tsghv`P^w?5CPf*jI z5p+FI&Rd$3d!6&eN@V+6&LD>KO}L+6cEfl)r>Y7o6xZANcc%NlLbxS)7l49#f30yIv(wM}t@szG~4MwN+(c--AIhGBEb=Rbrs|S;mai)nb-9LZr#VA^+p;{P3v{S51~rO6!sslNdEoMS+U^9?AwWY%xE}0~ z@f_ssK~se4>+VQDJ_N_Uwh0KdA%o_5xKid%c<=s_!!%-_<2#EXEwXXa7empwkfM@s zWdWO#{q8bVwW^63!eh=14UE0SW=g*HLe3OFQ_wKZ3VL-Lo=RB{fG>r&=-|}!h%(Y#X!pAD?x~vii$v> zC{STQlEQ&~0mtx$%%%*AC%BIGS1AG=(pyaA3Y)mrc?$$QFI}~hAmY3|?26^9j#GX> zsG>rLDh9_i=(81Us9~okIj;OqNhU23L9eBRTRfwhj0P+^!H2HXVxC3;!K#wL^F=oS{7 zcnNH+1}Z_$jV;}zxJHc=e%!r8iDy)u&wnxoG=RZi(-?AU6EiwFu)W+bxmIF>eSuE2 z-1*{jQGuX;Q9&Cz$UooRPo=n}+qJzdiCCxL(w}{f35PEA2{kM+SIxJ#zs$uaW|-^p zAy8xW+mz zg;A6GaZ4xKUsRJ(2hSdmHcifWdQD$#f7C8U$SI>7&yo`HcSiEPKAWpkU^8ugv)nba zEQ-*x(M%dQ@s))naU0wI>hnwKO3~*@&lnTNX$j|9ReKC*pXmEdFBj2yf3(Vz4qxnw z5;JeAP^dsmhXl`p4e_ub&1>7Et(6}m1H+u}HrsM0i@xM!^o}Gm5()Ev!7uFKN5zJ7 z4W_;LZ=>;X7bSUV)oO3udV2qY>hY^3I`RD1@TadnT4=vhjpaKhhIvXAN+$uN0V56K zB@{T77u$UuhI^04o7(}c>N)1M2lG&4X zPq4bN;5z@>oz11v7}$cHDje2S2lOG(sssxvu66AfXU}ox^9d=BI#}EH0amA*!^2~qn~NZ z4bOf{zS0p=(KnFC+D0jZD)_-oiEQjlSY;y}(1RMGP63)ieWrmeQAuWcJ4u!H({?%t zr-~ZOk9mph z4r_jf1+YW&3h&v$)|Mhs6UIx_h{J1}lj#R6723Ix@H_QU1x5ZAi^qYLZ3t9;8%Q+G zo14uA6lRLKp(HrgP^(a-nNcr)e^^LO43i2SgG3w=m&lDQILHTH`%8ZH-bk&;pJ8~c*Tb;@Tg z7_+zpuFubW`n|fw7P{zes$+~d823kcK8A^riaeZ3k~U9! z92?nIUXzw3Y&1ucAxpqvvyAn7ksN-J!-88cdNFS1h+5$fi7|!qwbt>@ZgnZ3AJi*v z`Cg3jib!cD49fohFm8%mJ1a@%N}XN!2IHHyWQWrh9i;*Bjj{`4#@4GqK{Fjb3NqbK z!hRpe9?mnx_im%cviO)Y+ba{#WUmZH9~1AT);p^n#A#7ACiVB^Io5qLwyoyP}!R#A)Ur^_w=2J7{HVez~4$ z52^RVh70$-)ESnTZ*T8}7MgkFi8~=|hwGGCyh0)QI~vF9IN2XprNOHnvWhB@^Hb02 zQ4`wWUpw?A_|u^AddBHtc5hg;^SHSW0J`Y4BcW*sD(LonRBp-vS%-w!uj9rTd-Ouu zzv|}0m>{)vx8A@>L#nvH68M%#^po_V&lj(Cj`ro2&X;P$n+4??DZcH9kT|X)Ksf6hz*P6JIF4G~66inWVm%x9`(%aokonFl6rcW;`AwuAc0L);d7eyhEA~>O zb(%apj&E>XmKtgIPO5@2mRZFIGVRDwwggd-B*a>mEH{V=%bhY`yz_3VQ7rJYbl%6R z!5LIvZgji%c`F5NR^SG0pNLgC>1 zoGUhG=@%FqfKNfLqI}(5wqQus^Tybi0@D6scJ7FULyROO{{y1hX!+avB0kiE5^SlO zyyrp2sZ4Lb9sp0ty%iuT$@^V$@mnD8*kmAv+$QV*DsyH&87H%u3<)|(?~Bj!yFFM{ z3~8|e*bIzrQgw`@;9qn$k*#FDZ@!GfpJ8PlQ|t{(lBtwcm9VqKsG`>p50!0nXz>oi z{QUPMbqg^g62zK9nZF`-za44uMFcsdAos)|k{t&6nBiXT;x7&M&J-fvc|RLBEg2{? zl}Mh!T@o{r5LGik71-Yf%`}P*s_0-+w6FAy3-#bq7=4}vprb&%t93n?bwaZB0_C#@ zcu$l=Y)nlK)Pa)nrzj^oL4kIJAV*B=ZLc>mL~bxh$j2VnzkZsOAx2-<&;|2T>g5J~ zisWY9sS#jR!y~olvYG!;5#~yzc_Q|#S3tSKUpkXaJn&^`6C_g*@N`c#5TyB+iv-v; z8$$144xcT-s4ayt+Lb8jh8jf%?cpc4ze8V2+Q@FCC|nn4DN;KbXsrjJm59|ynZgCg z!8*Ah$EXTN=jOHqmIU{gH2iazD-q(cvoE@14+_IHb8@ z`7hc2Ggs&u;B=L}@Vi#MKn4@1Xy#cy>Ck}Hj~wrzDJ`f#gk$%Tpz`iv8ZZ4upPDle zFvQBXy(851H_vb1Vn8;IMLsVebPx?xs|p8R^vG`ZP;V;%tiwNWJ_-cRXIA^LYF)y@ zlShYmC8m|vgO&jj{ioVv85hk2RdW73WV-mAkmqxLcyffwLk!&>q!bN4 znx;q}nO|`(IeR{(TD%>S193?ZW>Yj||D_M%RHfnvFeE9w#0|~Oo}45z)=N9^O2C=j zgi!Ph$Il;Po244POMfe7E9V=ry0Nc+Ej@K)U$3aZ8k(u1BLL=U5wE(KwTPKBJVV6W z+)8x3zLtKVio{hqYcO0$IdlZ!f}fFs62@?r09T^(5jT}3T+jG9!32{VxCI@ zY4fBPOdgiF0DW>WbLe*Zt%+)1-6e*yt@hqj*F z;Vpo62>t`@WU~rt+kEE6#=A!wmaEkzEjZ;o`Ca_H8J33p%;zkW2Sm5~;C;7GJm@T% z3Fcy}IHhDOdRo7Y>@A|tj#TKPRzt<1o;z5YhG9kG=97g@=jt)H`s~;73(#G}axWbe z)!i!dd-i2sc7Kw_ibN=T{aGA4{M*Hx7ux5moT`*vE2?HtisdH3aE6N008n+g{6wlU zhH1wQCH{=DPNZHDjZ(yMRVWG@`c4=+YRsUiJDd%u(n-qvAzkooc138PZ_2d6Kh9co z@z%*kAd2Ku$H;V@$I#*xU*9;{P7`KQahbI;LD*O0?ghsszPz=t(R}4h9`cdrYB4Y` zo->G3h^eERieriqz+j?l&;--Wx_4;qn@m zrM+UtlPsUiL30^y5%WQ65fUCKouwViD}&~3M4Z3D+YHZtIlTvKshm8BNz^s32mcLA zAx;7n!!r=k1kl{e_oL9vFn`^lT+y_=f^Tx*M#3tnQc9l=dh8Sshqdx%xK*nxEf)i= zrkq@jG+(Hy5SdtOg zX<#o&VG%Gd?|T*CG?Iyhy|@AqLjt@}Gq2UYA)C6Z=fr_17lsJzr})pOO3GORjbHh) z&m2fejxo^k$q=>P;0L6mopVRL;azq~GSg<&Z#8Wl!_6Q=6ZceueH1yB=bNV*7$^~E!9p!y(21A|l_&&}6~mWUNGQKTePgn& zUtUGYYS>{@^%M5&RWiaib&SS}AqNlp&ByBMqujAD*H99`>pLKQmy518F(7QsZCB!M z%v=@*NS=!BqW}@ZJ!XaVwGHKp&#E0@(5`x^G8%B@1NzU>0V1Bc$m}3Ny@0{vTe<@s zUn;1c;+b%g1PbH@ee`kSvoqv7>p~nPrOIUW{+m8_fbC3l_%%=<>y!!|Ts>q-=3(`{ z;eIp9xK-Y1E9IG`#xi;PH+xKs3PNqBQrqT%gDxLr$zLuP@<+$rR}*S$erGnE$cv!Q zmm()>GjG;Hzd}(rYO?skH|F7UC!CjAh(o?z{^87twN46&%`3nnBR?gby!QEx>dUi` zb`!@_`%a_MS+exBz)f{c}Lm}?|q*dp8+g{hivTq@9Wrgap)pf zbSc&b%GvkPeFyNzMV{rBo1e6He$-AZ+qoI~v!(yeQ5(N#P66n8o-yvpZ(Z5Zkoz-} zHb0&ug_%mRnN}W+dfyF!7Zs1*!F0Ynan_tU$O;lAJnwaB10%ebNB?o4Dwp6t6ihrR z-MCCcf02QXB;I=NI+|bQXhl@^ow{rln@a}Z%6S{N8_|(+>$eo~r!NaUI|8T>>v9oq zS~lL9pe7ZYzPo@4^N(#0-drWkQyu)O`aUijtG`6;%VKHZnYwf?dZkB)+>j~N$KB|s zHWJds6kd_mSJ!E5YQ9;~4MAZWe!c6&n2o5lQaCTC{-EBu><>CssESShkAJ7< z_D_7vL*UAW@7TYQd0PihLvlzQNvDoM^bx`%#+R=F9Oe*IB3EqmERB>nDcZ^<`**$Q zncig_5h_AbB681KMC3HewX1)tWA|`Z>3uOyL(1+OWs->>(N?d{sm$wBoH})FJiia$m z0@hi%M96G7S)Qj5i|YXsZf4Xy9x+#En|1Fv-fVKkp}}+|T<=>SX66tmNXOZ~I*a>YLq%y2 zWFj7TKeKcgw7klmh?H|ML7waL`XMh++X%SaJDiA`e1*j}) zuq7WqrfFt$spyMdazfrdZYSvOg$`TrU|vjz7(_f9Z}sfmxRnU`BcKkXO3%1A$pM8A z$}MBuP)=tH6uX;(nQc`we-+$XRIXKy_s*}Z`{3hHI0wzjp%-EpU4(1gO@LT6I<-Z` zmzmtwL9MCE;h-A?j|s3yCwl~XN3E1yh+Dp;e4B%Q8=vS%0A zbB9VumRO^VP)F7Ex6>~T{>#k zQHrY#I1qxS6iH7ftABn#2M?t2=U6)d!IBtV>4L<~Jg^?f2i_HD^6xnSJw*7Lo;wO9Kb0`w)O7hy2Bo0W9Q4`q;8A_^J}eE|dt>fA+i= z?yKPLwNH}Y{aFbt8Nn4u0mWi@0ay%9x4)!6o=saDkZP?r%Yw)k;Yr~q2vZiINxZiD zd*y!GpL_H*0mm>hSc@t(A}I3xn1*2R8m%TAy8*^CQv2ofb9@G|_gG^;c3cgodfa`a zoz?_A)he$LRodq5pQ#mWv} z+)S}_oV+i!Yj7Sx<1E%`|9ClhSp6pXoQj_s`RVJQXRcOz>Zvvlnu=Ddhi_Gb(Tp)E zA)VVli_J*uf|LoRIZLwneevZ684@|0*T-rJ#f;_iUw%NZquA9xVE%^Wwp}l~lYZn5 zJ|0&7d@pE>jb-Ih)@DjaC{f#Qi716Ly_^gxu$NnlZ<~YO#~g~wTwzYpxa`oO;>H2q zIvB!vE5@=*h+%B-g<>{eb>O~Zw%GN#+I2px-$c%=@LRly^%0D9f9aJ$it5A1l#V3A5_|ZO; zals0AZfy9WRracECqw_!#Rr5)KTo(`1;GqSN`iBTCE7$LjXrfJA`e%Y8{`(92TUJ5 zPBzY-gjPLF_p>U!UhseRYJCi#HAPq7gsBd*`YMJ!?|a($2U72cM z{SJ7DD@bxv^WaZ*vJxyCSgoU`0Ut@7kYWO3eMp0W$o0~J7oepv2@1#>=Qq8QSLTxh?(3BQd7FDVgp* z#%LUg@Q#5VNJ9Du6kSoW`i?s*V=ZC4cw0!t=#EA!uiV8Y@6&D0lu{!G@w>6k7cR-_ zJf02D!v*V23hkEd^h=YPQsY0b;yzGSag)I)H_a-i|7Cb0XE5u=^l-6y=>?nioP*GO z`x;u?x6=5`ddmIdR_jFmy_i>0raEt6h)m!!GMlF{c9P;A-5vQw8hWW+fSm-#vsN+N z%qmB!QU4sUdcE(@yClWFaklw+fQO!o!$0xtG`1btyUShs28SD8cF4Qs238YOt_4Wl zz&~WdyC~^v@0of}av%O3p``%JCt7~y+=7VV*p~8>&nCNvuMMcW@lpIKrUS9Mq*eVVanLd8_|6h~Mf8DxVsDtIkb6aeG?@(R)DWeZBV` zx|4Oc6CudRDxSpw5}0Xw9~lS6^tfIuqWh*!q7>Cm!KRq&gz&@)mmz3*?{lmBR{Ss51xK^x2PJ{6I1alZHCHRs0 zf}Qok`RaTx3^9FLiu#izW*8~@1Tiiy##}fskHnghsKD=li!<5}MyN*}284Q{w|{nM zvdyR;P91Kqo1SoH`0CeDV@cDz-Hc)jEn_!0bR1w>bBnoZeaAocqPAQ$AwJD=P*-7l z`KyrGq)^NQHid^fd7a*?Wzqy4aLt#V%rV}UjA=p%Tb$5G>q8k~(gPIA_Sg>vr&7)OQW)tCMD33Kpu8r+3Y zWeKm-WYbssI_9+{Q3%t@R;slD9jky-Z@E5zR$e?VI~@^NVK(?oLulfyp%aQc|EoTh z?9@8#9s8hHiBo8H;v98YjW~L2TX>xTBpqcOlC*Q0S+A1QXuI3 zxR&`tbzz_APqR~tSY^A!fS35(pirmZ`U4zte>EUD_j!gq0afTD`iaV-{voHB@9VW%pRJ#tO-mJ4UT zqd?xCIwZ*7f{@$h(1tzx0Oa6vM+M|!wsjj; z={3%kDWNb+G38-o{zdY-ej|=6@TgCudXn-+A^x*le;QZ+cp8@wbo(_8ecOo?{8}^P zpPV!FVd8+`O_G?C&T%jEGSeXQ(SQ=Sl=q^@);#qkOUSU~G?4!fX|``Ns83V6a|nKW zaNkK`k<`GKn|pKw2GR6IEL?>^^k>d66^bU64>AMq~ zi~Y)x=ns<~1@-#)vn2%^a@A*T@&qt~MXVJC!Q8Sdi;90gyUj)fjx3D|gGMVC4FR7x zNGCf4A=qy*kMI4KKl0d9!m{wQ$v`UFKS?q{5lFPKLf*l=NO5E# zI_m7UIS+c=ROU+2foKJBD{86 z@Fye%2|%@110h|X+<_SL9a^BsR696LPq&1U;Rn8rNwGMJ&8Gl4{0s>M5yn@kuK*hR zh9;b%JruLTVn!Iv?w;MxWvU(MVdMZ(ZdX&bq7tL)cgx5G7*$LbDfQiFXK^9DpZpp& zdrQqCm=F2Y&vAk=Yix=C(CIw_tHsa@_3Nv*eF=tNhW{8eZ95{ zJ;1rmOgNGxrDzueai8L1XrnyPHBe<+Bh;Ki^;`mS1^m7@yc)&7v>QD21$*#wQ6zqh z7PW2*fehA>B^f%vN6SZVM{vSCj^THw1kUDn$U|i&5G(q4k3xB30#FP$h@;Q$PY)p{yCL zfu9|%(7VV$3j<9rX|lxPBRx1$bxa4TdB9`_xx|7*;jlE<$2Y=iLGR^@A8P-w8_M5v z{6pdjZ~&4*tqwH3ds;M^^$b>AM;_z1r5f6wZ-Iix^I|3Y6#WWwr@E8)@?*hi)cJ$Y?b65)Kw(k zbib>P;U}E?@#XrX2Iki~)097Cj5Q&y3O3WBgh4#SM|Ta5*gnrKLI?cp^2A7#HvL4T zh`4E&SZ@O-73J_q;8MP=1-4pMD|_Eg{H*lp<>wTFB}2Rs$If^ri~4PFW>OBPesprh z7k2)BYX6QJA7pXE9WS*(?IC>PyT&T*Axy>1E#S=kFrBUGLpVV?8v@!e2iV}h{T?!A zQf1eS2n4AWJk6MgAlASD-N2aFj`f>pFZ1;70r`zL51afMpOncHsE1mR-kuCM=h|&s zJ+trmjH})>hIl+LC&_?b{cAcD;28W{kCTS9CDo)K?Nyi!eGDP8k-hRUnQBRl$G6-r z3^8*(dPi^37MSNMwVlSHX0al0^7^i=otGhaSx z!^*1m!UqjB8~)bzjBDxo-TTvV<*)tF8U`YG@}JWru6Tj;;o;mG5C#oQ6}OemSWqPp zn}of^W{j##R1g1MCQ~nD|3e7vd$0q`TRnh~JNi(7u!PcVy<162u=TYQ8s_`#&2+CJ zXHC6a6&f{oTqk}CSIz4VNAMht8tt$4F0uF-@+unYue%-_D>hLUWIpWt*RZ)Gm5i z5q?#6Aa>4}1$=;9b779tWAw+ZKM#EDqMgM%zgvA( z9GvwA^X#>dR{W-lw1JoILeNG0YvMuzJnV=3k`N5!1FOD#TRnWARx)&-4w+{vPS7Ur z+Nmq`iHBTPiM6hse0uZ+^3$?G#*bu8cRU|#qt?hcmgLin0Zc&Q_E561no`8;oCStW zDpw1|Lb9oSV}a!Sf_e0n56TzyThcA{fYD9b)IXYH{t_@PBpi0uebj&i5sELr>pUkv z{F4{kKUeK|_nNkGeD-8CS#7HD(}u5EK6$E;XJn+XGtZCRrLJ?G9)EO_i5AJN!#isE zWG=*jYCN~mt9-8^%^k`MXQd)MV|V;9wKq*G(z-7F@0*=DoRk(n+?n}l_#AUL zT|Y6hE1-!Zx34PH7X)X6`v&j#4kD2%-|VfKptXtMxRT%ZO&en)n_ErjwP{8h5t>75 z^@Ia1llZj<;u9ZN>sG`>kLGKnJa+A`Vy5}WHa@Bvl9Ic4aP)mN?3+tUa89 zaU#0R+sgD&FtaoL=rcrB04EJceDYEd z&y1P-oW9*RhZGct-QG^({vY0E2kIS%tZnkJ{#WuCY^#?`Hf+LgmUkB_Z4O_k#{A7d zWPp)Rr3r4+Y?fTJi5MREe*s1k!x)Ah7DF?dScUu44!;7`pD76eaZqzLH z;5$P^Vx~vfA~)`cX@Jx$F4OWv6-RStx{OXSZRe717s#l1>*>v&p*}Je^pf0~EFuS4V$jr&-3~Yq zjpOqC{T-LAdnIS$&MO@;AMCByf>n*+FD_^KqVhFNeEJ8vypy8Tu7QRJu|4$cEp=jm z8c~InxQ8S9Kw~K;yFvG@(gFPK!%#;<+XxT3!1uc=FczirL%uPufZ|~dJ-^ef(O@rG zOpvOQW7?mTCuc@s{i5Oepl+8n;V_nqP){1;y+zy=NMEuo@zjc4Ga)|9A@nv3ba}Lq zO8HKKAlTZ0@i?z_Ck1hU%I8$IFX`#0yoMNaxDEZ3XQo>~VT`(lTKc|Jx;qS0t3t=e zJNS$&v7u~3Rbr}GpZ-!y^Ci(wm2cpQZgoOX=gBqAnhs!l$=X5FIF&Lf^Mr@bybDgW zsL_q}c4YL8Q|sEE%9g2=Yf69srMGOol`u@g9XM$oSc(fz_I!F|%TgCr4+9rDHJbDH zDMp1VPcF?U_f8kL7XgVa4NT+=(}!1PD>^CjXE`g*?TY80ND3C+W19>r23bm&0`wal zQWovz4XP2P_!#?VLOM*)o);`Qr0m7w0F`01ajuA))1Rz5cRMT|MWXBDf3y7hP;-&0 zcygQg&>ZmjzmJo!(YFd%CFYO`k&U16e10>Qh@C=MY(V>c}BJ^zS@Mah$J;Cl+6zc_f!ldS0xl~54cfBKy;d7I0? z!f1E@Q*!C4VVC@xf?i3BbzemRH4n_sP4)iRTOAjx0&LCn-v>8%w%X03g2TFuB7<5h zsta?ao=-N-cA?o6t>7^@Ymc^rDmTv^ZrbeGmzWTym+04xHb#T}Dk@1n@s= z-W(A=l?|Fb){9By^GVWCKlc7LVZJWf{{D?iRd_TIq|@-c5gzuooV=5>n#yS$z@`WK z8Oz}0(txTkop=KjAs*JxcXqjsW=nk`*QrE&_U=7@tkQma7mm7`B%?-tVr^cyQ{~B8 ztOy=~VPW9@`QG75`%4eWCBxAAsU{yGgVDS_qXAgL+6#2)02QKFw9{}%Wr(d-Ib`y* zD$=;*H`Gxpv7>)NgGZ7sZlNHFpqD3Z+W&Y#bh@Pzxdc@v zmhwx3$4R8xm_Da*!qU(eO4*N%UcZxE%4)4;3=={U!{u?r%JZ@E2sm@7(fwN(1M}~; zfNx^{aY0wx;zL>5_O%f;pFbHKb%dKw6|F}p-vWKUdO1p*DR?=8TLLEII>qI?T3?@G z30YeOdcypCmVjTy)vbSyI@qv~K@F4avI-u-!Gq6=sl21==u-2nTaPRf@ei=8lmKrl z--=X^u3f5JgV*Eua~=_Wx-Dan+gFFiNT)xKFL7+*;6oM)8jkIrh}?k%ZbDl~GL80t zrL{d(&9A9(RG7G5xKl$jxGj`fvc?{2Ek@9xqpF|0S~o0mFBtGV%!Oic{2~`({kVV& zS2!5gT5O_I2Ew0*xvj+$UiGc1SV9#aIfXgF27FA0V*oC-dgg%S^zn*r79vo@}Cg8DuLUme8rc} zKwXs&&lCd_cKi5fP0MMGQsupMk?*}Iysl<(w>`CYLvU>1A&o|r73CDdDgw3P&VHd3 zibl$x{~8HLa4qVM+d6cHBOAh*PR6?M`^u^J$?RBcUAbmRwM(uy#;%Cg zecdMe)2(gQh$%u;jUQ}rsBC%bb+gPL2C2}B%_z4uPMCL_zANw%kS_<5n z2C*kdq(}7x#DuQ)W~Qtrk~J&=$^8(N+bvsJbXXI2YR;^Vf<7^fEF z-Rqay61dGX5-vYIwvT_0SdVv+*k0aoLwZZj+GDnlzx113B1oZ+u49|&KsC87a7ib= zP}%C$w+@Ic?WsRtXy%9){;)SwuhVdZ-xoJ}by?%Ncan!{F(Hu;f5>-@uCkwS-{=nR%=JBV7bmHP>BB8uYsEhw`c!qrZ~74tU+KY7`{(Z*$JV46wY zVtxPJe#wrhFt^j?cRR@v$e?TB{v=i&lrP=vOPQi*li3@ zKtsBnPacnHyqRkaLQxdY=?S8xouGsqNoeXJwBlHC^1X4t7lY>x&i+hYvE@U!cP+5v zDx)1IAohqbSYkPFuq#U|9ZYmB$RmMdm`UGTGf3!+&QAf(1Xc5`~vRO>; zWZjOoz+;mG4E?6f*PtwNcJP zc{N%?VKMsc`(J5j^O!%#yE9nmuUQ4+S!UE|$Fg&Aq>}oeNh+5GmD5b`3N6(tRm5pt z<`F#Yd6~!9M>O*tNA5Vo{P{_`fZP7F--{_;Ljwoi!t*Jd=0hGszdbe|&M}Dk_9AyrZ+#GbDF8B-y*CIB*$TRkc(+$Cy6AXPG$YvcH0B@f6 zXIrbHnwab6?*~LX<}`{kemFX8i-``r@v!{9&W7>nBK@6+Zp=^H&IdEu!^QRc{6Z2O z-Ars{)=yo3%ej)?7H&$-eP2{2|NUl_Z}i!#V<94QChmQCE$BTadFt>m-8%jnCC`MDA>sshm5RltKbiS3s`+2wT%HbE43wPCLpj z$Gdsso4$by&UU*9%d&^nU>HTI-y=~Iyr5S+HI-@k7!WvD>9+x8dP!r_8PQxt zZ$$hyMXcF31?h=D5( z9m(WhxRBk7#fd(@LC?yRZU1D;fruQdwYwobk?Ew?m&g@m3CGr2ZFsR;xtY9Yrr+OP zua?4Z{*06KeVMVt{f7RjXRNBU(`{9Ihu0IejR6`LSAmq_Uir&N;^)|nBF1Dz1~)~u z2F)j!&Uf@jrXf8(P+@)nFYXh)dQ1_KmY)MYYIP5Iq%v5}ZBLvJ;(qD8GICtOkjJ_1 z(=onGozuN$Qf(BhA{pcBKrv{(L^~S$F1<2fknA?-e$jnqcS+HNrBA7YY*2qjx@K-b zKBYuklHbJ8!KsM~S<6~4#9Zx6Ctfu8Rhvu3R$%5WZScZlIF(?jexCjC9H)$$?S1`L zE#K*QH}jxta%E%WMf&qsVhz*yFS($;_D?93;rQzdLGbm z35Q-fgeD0xuU$X+j$eU)>|3B@o9om}NwepAb1r~+Akv{Xn>;5Zylt7Uc8|CC9E0uY z&Eh9gks4=h+2zE3;vW?AIXG~V#T={a$ER}lfX%V zKnW8MEPJhrZO4bd2eF@A1~8h-l?k_`u^WkZp!j(ERyQYy?D>^%^~^po44W zxo@R{rB}FtlOO3sYtUnh66}cP@JG*NhF#(Db|3)ql~(30&v`}NfJLfSdTZ5}ABv0-vjOFQY&ID%~_qXd*K_zNvA87?EN;)Hm?q4 z_0EJ46|z8xK5Lqa@6#s2nj-8|2krc=!!k{HAlF$PU%lrm+jf8(L(Wr8xFfYAQ>grk zvByWd!aVX;W_H7ic4(d9&ON+hO#1+nE{$zc#RMS=5bQ9!^!_mmT;F)8|3 z^`U8`yf7k!6FI3!E#hvHQq=asTQR&QR?4F&eM@OJ0p*B5r{00y=lSEcuqD_2o_(Er za>WAy;dR0N63MFL4u#9TiMRI9Chm3j9O%A z+^n~)(?6)*CY5N0l>h8g(tTy0sV`!W$v3$&;}x;Q8NK>-cx89w_l@(l&hJ`<3=ZGl zRgR61j`L@|#2cJ~RoI~~hh}M@Z5}JP^r;c5ImQjC!e?xaIZqkt_v#IzEw`3j@}D;a z9~1N{hq?ax64O5w;ytx;y?MTNEp=OL;vDMEAsgJxM9Mb%dy6l&f#RK1M)PoTeIZlY z*sG1)x`k5_VkWNvgWN%uJRi;jzg-=kj4^Sv3F1^b4<0fHWv)a`@gt{6tmbiGQ;+mh4m zo7kFJmKeK+P~^7*J-PN2TD+TxWG6kSCdsrmQ>LN9jugn{N|XMI zjn6{?*Gd6`S3EY;>_X%Txn`Kw1YZI+Fj9b?DbapM@7{eGoW zCAdoue5wOxf!Y95c25aa#co3TUaYH9a+Yh+0^>_g=T$* zj`{L%(93HwIw@SWy9!;)GE`;Y&1OeNg}aFdKQHcNS)gbTkAa~y zfv}k2GZxy)yAKh&CWK%+^(bEJj-4{$nQY*zU^agv1rLt{2Va6F%DBaKwEXtF?~$8M zbzvR{JVB9Os2$l@r)$cRJ49et7^;YoF?3<#Tq(v@pM$19d-pNU236V%8tFQe!!|zT zq8ptbUXq%X-+MpXsYgUa3ZbKQK5KbP^?l@*3`0mbgbuSu;gwT0LHz;IsPW;FpVH_B z`B0%1UfzM$St!^ie$r2ROs>UvE=qFQN8TY4EQz%I2-YfCgyKDZSzW$*yIb{upO`z} z$c>RT;917Suz}xr_nO)0aE9q4xzgP@NYq~U**GL>)se>s%fLJ?sdXxp%g(#&VsQ$& zK@v*nr5x@SiG$vy&}Ssjp+Lgk&>T$Do2&91dMD@*Y$kG=U5n({72Ni{&ysZZ6PCrd zD75z2&?#zYTgnHn$gch}37PGfMH7jRoQ+f#avscGc~iC`o4lBYU4|^`;p6213FqHW z^(~k78{-*nm*S0vV?Qg9OGDEyQEayt#e|T9!2r0-%Ju8H7bL9=D;*G)s2pA#;Xk+Q zk=40bWQY$P-L2T{QCj=jEc&JYeZ=g3UO?=4wugB~w3Gr-j;6@K#ftd^eh$S8aH{V& zIeMlYK_rlsIXVSkoYO*FS8h~BCdXU=k1>u_et-J`BMCpyAhHDA)HBB?6X=S zf&EEO>4<)JGNnF@r!p)*9J70S!=9Razy2V~6}xN=7rh*F;2qyH<$6ag-u5XOz(bWt zqOY#VsBk&kU--n^o~gg`r9=Bg%M3{oJ!P}*aH}w?zvwE`J3{p19+vFQfrcBX>F+xu zt!{B=qP>dgCpx5{p52}A>&lhkL`if%=~z-u>HESmfeb0to2Efhb?uq~ZJ(H9AULiH z5bHDj-A(`ah$#!}hBuGr0I!{_zJr$bj-s4-6%ljD#vOtQ3W8Mv{{eDrv8`(M8 zkSKtW#j8e~EGsUSv!*_eNa{1wh^);xU7*S4c|H?ex#y}qKWz^5D=Q^rXa#cM*)sZ4 zK6JK`@@wCiHzu!fvs7+UNri_V(*hw@XaF5UKq#CUCR_Dn+au+L)ZFfj?C_X_T&L1)im~C?Gidv z_sC07T+CWes)BBoquyrEJ9co%9Pf#%xLAS8kJ%D^o5O3J!ePYyZo6%xyY#jtx?hnC zW9_5GBKvCz%EF_~2{ZZ(-pxZ9j_;W?%E_@@fd8dl=7&dx=*vXIBO#zv?|;RmQrO^-4MvyRBP1nGosK~qCPrMx;e~^>N9=9MhJ_!4aM5jn2u`@bJdH$OL-!q zg7)gmYg!C!5ij5BqMm3eku!UHajwefU~|(U0@tQ01aUED+=KJ`$>0owVJ*o5GhZ_S z#wc|*TQ|)2G35>Rl_Y{Swk1imhjMdGYemn412cpKJ~e$Jz7AW6zY0-d4;rvvx|5H3 z+@u&*9VU{?3_bIui>UffDOH*Qg>V0|eq zX4mrjn-etS%gjCULw<)5!R~3pIs4s(2=ol%Tl**Tz77W{mY1tvuJSM|Uxdd-lbbmw z99q8=Zw;1laiEQgzX-UyyS+Z+QG)gje}rd!<@O6{h_?elaGy;%UGNI8fzQe8^XJ$f zVUDR(7LMFJcipSvM99bGG16Z}a`c&?mjO?s_%BvC**KV4)vWh2HmiJZJ$>HZP$yH- zCNpU;{c5c`6LidwqHAb)D)QjFG-p09JhY(%Q|OEp;xq~_juj--)ELsBA1&k^oY2Q ztB90EjxO{mFy?^KN8rkasEh!7hw~O(e2rm_A*%7>BxcV%v>~;gvEj|EAJcw7>f0wy*XmG2=s?1O$Hj>U zpfPobh%ZNvvIIt+ttH|!C~Y}*lyywe`&FO#q-jJm4h0+txE?GPEg^O|`Sn*9>L$m} zrd~%r{y^CMrf%@|xKZ#rHYCYgx#K=nZwmZkxujaUCSX#R9Ud5%Ny%|6t9XCg_FGWo z<}k>&q;D#(HQMhvz5=7g6vIvfLrVs6LxaHmho}SHORuN}Y~6s2WvAs_-)AN;+)XAO4}LRW?N6k4>Wf)rF%X;#So&@GyV6|s4lm@1{ZU}z zv#}oZ(mHtJB!+dhroV*Idi!sgP zXnO%x>DiZ049WtX-&Q|xNKohBBBgco3;=P!q3@X z&KVk! ztzPIX*H`q^D|LvrQH4FtfxTJ>i53l_kdji_rXRnHr=C&VBcIDvFPO z8Yml@ZPWT&Q$M=|VXGa|@>@cMODR6aeQrQVs2Cd#I9ND*EdV7!BX8?&Dyiu(7%cQoh7hjfp&M;7nkksv_Q8) zyrD*FGUJvOdb6$h-R!4%R;KPi;5-}5Mw*5?LhW_izftU|iU?)dH zz>$-J3)*SHY2m_|`2lni;bC>8j4;8mSK8aJgLLX5Sv6HE_J9ji#vvW61_|+3Z#rq- z!_Nn(u=^`@C;q_%LtvO7%?oD;f+LQ<3Okmp#VQ)SV9Oa&1wl~D)XSPye7a?8W%g?` zoQ^1az3pB}9JX}B^Fiux;-#9gga~jzMl)M_fRL4GuNM)K%Sf>P1%dCOFdN4LVDp`{ z`hXmt)1MQ|8n9tuM-_c!8rNd2g0@T+{_7IN*6_?BFDhAdtD|=&i#}~_H}PSMi!IgQ z;lobf0hO@(c{Q7qSc;z8X6L3egc(r1qfDG7o5X{CpYS9kD< zP064ElkNFP@MM5<0s!_~F*a{qx5q*rE2rByJ#yCo6Fj{6fdkba4)yapFs*r?Bbr31 zxxs#I!YPhnkGc$B%#&(L0%=NW#aNt%Gw^DkVauV0-mJ>mZv?m>g;p3yi#1$yCM*sh zii_FvUYio2LT*>(1|$r04nPjTh#U}ije1oAeJb?6pRGCdBCAphXiH>GfzyF}VHY=; z@fI}EM}&uPEvINxqQ9u!L3t$0i^G*N^1G>QKsbEvxGD*Xj2X;tUewYA2l}3~ZXhFo z{_W%YtBm+`k-#HK0thAYd1;ywg1i4GrK`oamTg8KAlp56FY%%|Gr!9bl=f{;l) z)AMX+6kl0T45*kUbjxZXAW&07;KM(Ec0i1lrGwW)JZgnWbq=O*h7qwrUjRkf@q3^j zC@I3HS*!=sIlcAGv7RbcuYL3Lf*(`109I-Wq-{%>>DIg2S^*(j*@vz$Ob7-G!Iqk< zx(W)yJ}&5{Q1#XfDKKA`E`^_yhgE%xPG$n$d(oOBx^&P$O8B9TSa-)R=B*-JHnCVZ z^OG=@5z>;(@1ZK}Te$w03S!G3dDMAQ+g#2FBN`ZNTmqcr%Qsh$R~|BOQ=+Sdjv4X1 z0;y=QK?ScPJYb*Dmlh}pc{FfYhE}}U$?Is#@OH-!G`8WJUUjPV&Y>AF471A$Q9}dQ z_trNf-R07a;ygtgkb38OhY7`7*GZA{P*5R3>$U@ge|jUT4c7FNU4D{uT7(+pmq>UE zz}WtI)riB1i)of-tKo#p!il$rTR60sfN9+2IDC$UV(<;Osav9biV{)No3|rs73*}` zyCbT*g^&vCD7ywxh?`1WEH;2ym?AhZQUGTo=+BoT1%ckR9&(zIroa^k^w>Bc{kaEN zpx_qf6q>OO#Y$1Z(qr-K7t{qyZ$==m3bRS+sd9=}P>JdJTkmQb==w>-01*xJ_5ZjM zV`QvwTd#PUuTuKD4DVR`LDb*5`e(-e=TA{u1W<&_7t^73XJ3To@|5v{jndQv$-F;*UN~-#XP*>75@Kjn*dCAChgn6lqT34nBV)) zEdA%rBRMchkKmCnPT8{zZ|lwwq7l_l{@$7g>J2EX^p)d_{QGC$%is~jV19=&y-N`0 zkikn-5kchsroz%a0*`)RPu<35scGTYSCGcD!Obm7-Gk&X2${KlGx%^x+e?KKiv<=#3}VPL@{0bz<>&r zNiazs9ism``O+TAp&Zr(>%9lAU_o!}>Kdev{RT}^cK z&JH`5OL$f4so$45oeqUMq0GsPaANRDJL{q&ge=24p}?cRU26&TKSP}Rik2_hG6mYZ zm~H$(@z=l^mEp4Kf9i*$F-pTtr5a(*HoV+6l=O z-p4yyHQB6V=jw>UQ1O|kU+wH3axt=+bPrG&f%I(Q{k7zG^oF-BO4TB%*a-Q!EnJx& zyA9d?Hir}iC#leS%wzV+7olcj>;O^AAr!8u0GR=A+4_Q?7l~bs0qJmYIhbgc7K($Y zgh~xf9TC^gSTz~+g>o`EI7%*Cfr{JsgP=#5;Mw3RFn*?+s}@W2Y>J**cg}6 zz>U7c1fr4tQDEX~QHk{hX#EfJ-#R>5AZdCv3koTRl{{QQ_crNJSe9tPh3kl-(+jYV zc1HvHUkLv0j={`_F@HvJSpos>AB%kb2=ppxRC*c-!49v5Yx%MMDdxYk7RLf+?Hzsi ziC7CMn6@GJtST?0L;W8@)#Yao4Ei^4fBy<7Suok{PRC{QJWf6pTSJ&S-7>wk}{|2?k$?;w7)({pd--L+*2Z5{1$CJWHS!yW^4|}rB(wmk zzYQ5QTN2GlfRLQZ`@DEY5#&Vu<@X$4j2tF+pi$Npox~ZqIVMC26x(|9F`2*T0)-& ztcln?+r?PB?R*`8>4k7#IAci3n8cP*8-s~cN>{qJELK5wWSre1jf7y_FtIB_?YwP- z3ZYYx;QIRr=KcbX_DO7j3GX)ON!$=UJLl_)iUUf>WvQ3C@4L+&3Hpmxda>8FtC~!L}112J9a={I&nV z#27_EgOF4te0_R<>lTfMEsB2|q?Q8+(qlB7Z$bVN;2l3w=teONapbBTb9s=J>CEal zNDU&l61)9xX$Vfg=<0tW2o5aHIy3FYLBAR+Dpr&y@D*d+A~RcZW`R9jH#sst{s-P# z<^iDJV(LJKp?kal815Jg;!>o$qC06@aT9$eia zDk_T2U;3?3A>&feK}V?-;^085s}Y3)*qDwe9Ft)D9Xkpff7w2Gmgi^m06S=MW_}F$ zi&p#xKVyV}n^F{naGW#*5BwAEKhXvUwjOc8ez-81p=TK~F(QA%21x1JuKEChc&oa~ z10|~LLk#iOuLluh7B`PXb}U}lpHpX0R+A`NkiQtUW??*u^+tYgu7ssv?3 zU^U}#4<%;75+5OoVq^X@X|@cItjwoG>e9fv43XG#tLec={9o69ET>W`SU$j?1cZFh z4NL!pgP-C0%gR5tZ%J|n@t8QQ9TNQim<|*eOqT-so+vdJY=vXWoW2tT?I)P$%437| zy_&ba2O{1cp<+Am;;qyk0++Rbf-cxjeFp9#t|9G4@6Xuwo}7AN8^fR{H+vJ`e<0C5 zJkY52RO=xDfVbjkUhiq;w=Qd#YaDs95S?l|N50>>~+NI~9$2`TSuW12yG;oF|2n-&% zrsp$U(DT+`pTs0#HrwP_V2zKv1`!vVV$U)8yY@f`Px2)!gtvnaaKk2S4#a<*EUbmw zU-|=z(qLHow%!}gB4rsm++cwp-e93Pc&A0CfaybzY~4nL*(X0CC*VsE5(JHhG2R5K zu=DyVJAZ{u;|E45;QasTAuB#uBBm`{P)`_s=!W|M=^v~(Koraw`^bLx`nV`;3li;) zs|F@i*}2*;4?4R2YEDj!(IN`+9v24G!#~Kk1>^94Ux&j+)fPAFB{en74Iq%%{0m#Y zK%##4u9v)&{vklVJ_qr4{cRK;Cp;NA&7*hX0ywgmpu1}AxQ&mLLHp~Z`D2K|f%)!C z9shXJ4uvSm{zn*N#N-65 zS(2L7b}`0WSd7EqkAvm#hJ@k6CKK`yHQ^uBIZzB(kKqMrMf8i7ZLU(2ZmijA+fR!t zwVUY*KS0cFp6@NvJGkx%Knpyp1*Ho=F=^yKq7P$hzIlg5!7EA(b+W{R#l`L~Ck9EuM)6qx{_H8t zgP&&jsR_|>z;H`0{nLL9<_Ybx`lI*RMx&UhvJ#+I?5@AGV8yYVueBf5m<^I?Zob)> zc=GX?X(X#`!|9lU{zHtkZ=o#`*g}$Y($OOV*7G$SKBuEHdE2}pjfFZ@kvkK4IlrgB zDN}s}g!(>D5$_B3BbR5dd*Vukcc)5}L9M_q^#@G?zqGOmStMZrRG?M;^mCM6?Hg4f z(~@Vu)Z`3fd{MN6&@>!sE;ulWy>Ec%-U71~6LBARHk1-Hz@f-zeOO~(_nb}epPe|KxRS?QCAZ~!!fw9GHGo4AEE}ykrtY7a8 zzk0qmCy}Lf?mhrgpH;uUNMVsl-Gh~2zS1*J)LITaxT#|b6#KrJKPRXBPKmeal0hEx>1_csR zDAec)1YLiTa9ND_H(hR!R~(=dGL3Le=}ClPJ;@ex4JSw z1JX}qJnvmI0A;GKZh(3S$rycXr%5*Kc&IYz zXxB5=z=U)sLkEz=#n>3H%^v`tx2C{m2HY}n;w@(=%#)3X__|m>{<9$wnV$aZk>&6l znBK|1;Z4Ot7{hh{c2jzPpSvZ)G6!o#Bc)=qdKfxu8%x$GC`tI9?kcTjtn2vRxC zvCg(egFpZo|Je*kOUUp!pNXRn@rc+5()%geh}tYY90S^LF>TpxfZjk>{}u}5L!|^6 zat2cQ5_G=X+5Q@vt+L+EFDQPt*{^_mGGAfZZ&nRXuP|eT*&ty968SC6Dn&fh_G-aq zQGK7P1^SHt|MB*gVOgj_yC|TDf^;_mN=kQwQi9S*r<8P;gdpABT_W8`cXxMpcgKES z)>_}$XJ6;f`LnOfAFug7J~4C8J@?G0e6IpX#`MwA6g)-^^sml5;K(jgCMSSenboqQ z6WvP_?9jdheLgnCtkC=FzW&cgxVy`R$-{hD!axa)cki17MoZS`%g5Dk=y?h@b4V4OJixc9pB6j{fa?eFtZplW6L>RZ{tjk@r_!Sk zw*TV|fg@WMEz*ZhOZ}R8&lvo4WW;>7)>uzR_Ri>kl1Abvp~~P%m0M81Z2R1_YrIs{ z_~6f-q0A)I?DlXiY7h!mUadUE%450m91OP@?}Im(e7C?wVJ+o#pCgs>j_7pGdN646 zy7dyfATc?p;PRbf|h9#KmZ$H@Ur~LMHm4OM;Sz;B_;rIbts}V z44(lkv5@tb1lUQK1as>Ju=$DPVSYgRvGy8P5=cLwDa5}4B#FlP0b1_K)x&ep_P={V zi}6W6m;cR4lAu8s3Co7<3Glwm3~(Pcc8?)U28}1o%)sOGjroe%OK3l*eeflYNf_G@ zEIg>;?nFYR&Y>jPavvOB4n*q!rcVn13&}cr`htG2XL_+3%%N3qTYto3F%$qTU4~(u zTYZpofNzNF3=9JO^w1Dm!Q5a- z4Q6}8$b5z?_Gu6{*v#gB6W1Qp?AmOIwElbs7yq!tHBfj7Ch9)PK_CMO79e#=7mR}m z2&Wsg-swXM{6O^%4ys8F=l3pm$DKD+VBoH5SB1{L&+0Yk7{Hzf zDXoI^=DP%GbAX&m0XmYlLvH441UPWWoJhf*54_b~?wn}wcq3zL_TOO0Pug!669C&Q z{ncI)yzFyzE?OUH2Ews%LOBRv_+3a`U#bRyxb^A5mj4DrAj_FUO64=3!TC~5>R<0F zN1#-o!m0wA`F0TqTaJMWUt30!fKAe5gPCv~w z`hZu-YC02nveun}FxvOx(X{U2X1C3HGo1QX`R)Q#qTI0ytzlzHmZA*NjgkZ~;<1}a z&Q_U+Of^?qt{Q@#2OA?bryKnlU=VcNegqJTb3}M0iTG8>AAoQA4azS>tmAncnQS2g z{jj|}`Zgf?Bi*2~*V(d^Q+1BkI$>nk16YQ>MxQ^>;)5nhj4{Oe8hf=42nGpjv@s&u zjBn|+33B)5YrkUC<8@-wsfp3De0+MP3gB`6_RrDE(F7m1u%-U+gRpJF@6z(1e-69OA+KI7j%#pQoc5wHL2 z1pstV9uVJ&M(`r%r+fMHM2%-(et9G+n}XgnjGKH~O;AodxjAQcpfYx@0X$CaRf`EB zy2N%V+Qi|*+rvg=)t#sqoDEtZ5iGt{g5+m0VyC|EFj#Jio!9&PXh|ib?rHB<`>b@9 z_Z(@P$_M;riwzG-I*HH|&=I)f-Ws4_^eTK+7f$pV=ovAL_>@L+N#(!AvAYHaVH>>0 zya0XT9C*1iESr|T?*aY1cQ6s}9|Z1pTGw1G_-LB118c1-9|xRp3+8M_Kc9h%vGZA} zniEX5HthcptlS=!$QWA2BSKyG+!o-HzScpW(?BX4R+@iqI~wG+>uff$XhMS47&8Sc zSrEEySN;kF0;V@6pOQg56>cVxFpL79jaVQ441kf$*%5sp-ZuJBwAKWPmj&OdZ{-d@ zIRGlB*H7{=@QjYpfzS4DG$?-#&H9p~{zW1X)4*lazrGQ)G<^oXSvuP z%;8qH<;Nz43M(X5^s9hIy4eO{%sa}o>`rHf%$|4#yQ_hw^mRub!@nR^5<1XvN?=L= zX6Y{mdgZ>AT7OKMoqi?qNuUk9SqY$T8%@{8V8J`NbMVs}_|YlB4&-0TnnbZ7IX$je zS0zy42=t?Uy~y}F-N1Z~hQF0KD|y{ahCl)|`}RHIZv$MB9x3$OS6wuS;$Ra+E^2@` zkqf|j-|{g6?Dm&L4w(?_%AW`UQ~?-NR{EVMjU46!XI-bkIhiVyvE24%a$4QEOknSE z$6q%PIV7u1X{NY6IF4Pc2x9fg|B>&`*J(U09U1QN!qyPOXEPZ}IFjx6e1YgcLD2aG z?x_KE`m~OOgAN5gylCq7mqdlI7*Q~Q#gK&lC|m=M3sqEH=iM)la1J3bsRYiMQ2Sd}F%cI`E^hrsIFHWO8?2U^o^FvHW+_qA~C zj?_^|C6f<4bm;JBv_;4q?G`m6m5g$*D_oS}$o55Fwj%QnLEy)0-G)-%FK1p|KR{&E zcFG1%zup(OnGeMGAlPBc)1)PYwXme!g}@4VCI2 z*C*vJw|4a^ebik->;sVoky3ouzX}^}zU#8DMZtQfzod_r4rT5H!x-LVBI{BpT&TvR zyF6TP=7PpD9Sj@0K+}~$@7GJpgB4xkkT38P=Eib>iTzs_O4)nsmDj#@Ud$}Ppouk4 zq39{=q*@eg}fiKT;q>V2M6p7Jv<#ah8|9V3gK&Sk}?}>i= z15~!sm{QGO8o`Gl`eb=GB8W|HbpSS5t1ffQ9PmLmd}mfKAl0|3_a)#GFZyT<03c0o zl*bo$uAG!VbVa9v%T3y=DIUq%YIy1tQOB^`UzZ-hKBbOUw=yrViWAZk>1&_ z2C>I0!|J|*UHozSSw}EcT|DZ3u4)SV@D!~NIM*C+M11e}+kKq{>!rPPWEv{52nL@M zd7UdO{QeF~K%X*=h**B0>{7;$E0a+oZ|U8{^AZlB{l*e4OT=b^!+W!p(Xkv`WCjnr zuQUBUloBJ5Vx#TUF>ZyXu(~B5kOufA(o8C-ULIkk!Xn(nPgZ~=2}DvrA{$5uq1j#x z?^+Gu3JG%FuXqE&aE+kC0N)#o$Wi)I6~LzDCx?yF0kZ2XAYT!vB=Zc`%AY(QWlqSq z(oYZ!qIdGzCx|X0EvVT9o3Q_Naiot=te*-MXK3Lyd!s*kB`|DpQuy0Sx60&5wgLYDEHV5m|-RoH%4V+QbNH)oUVm zCX398HYEL@@Y_CHTb2(eI$0+I{8NYqA%GQlV7c!h9~vEG?;Qb-Xh2>~f`$-LvWxZRhvf7q6e_0@Il-;(zG^G;#=Cu*ATD_&oD1pbIQW zG0Iixl%C5Js&RtO)Dar%@|0b6ooJ$dy3?k_{H|)6OqIZ{o!wt9?tWIv@2dPrQ)fbB zptp)9L0Rp5X-2J9ZJ}hJTb{ygP%;B)b`OdIk?# zs&wgo8?)8>=@gp%m&aUDbsJ*<6j11YWopX+lICns*c+7q!rP#&vO&Jls?~e?C?P?j z(c*xreVw@7_7N~G-DxAv;EdY%wcq|;C8r{%J}8!DC3__h#FiLj322uaI{L{zM)&{g z582U3Q)7i)R{UKCiGd7_YNbhL;p(5oDsy5?N@)RXbzGrlB3`E<5KIuC-+^FGHiggS zj7>3Y614Bz21U&*gY0d-`t5G`x#gOBw;3UPBQam~!}QBXz(DzWbzpLB!?YpzXG4nr zp5^>-adEC{)^H^A)qAySY5uin=_Am%CLPc{39b0kxS$=~CmKo)1){~Zt-D78Ndfw5#8?jivUeMsJsM6^WlU)FqtVN#PfDu+wTF`6ZTdA=){7&85W5Q8x=Xznb&=i}`pwTZ zpwFuBsyTR~V+}6O^}&z2%UZz%oPGBWY_jh$Ocv@m`&0NeiEHF?zMO$W`pDW!WnNMS zuvQG{FJq#3ti+?AS?c!aO086!>c|o8K562M2mvgizX0(O&Hu@ z^MnllNj~==E@b-BLj^@T1_b5;SZO5agGt=%pyIt3%%wCMN)^1R9DU??MvyYtUei{f zu9;3rI~?dSXZ~les%%)Lv-hF_1Wq?6Z5<_`HPe=k&;oXQxamxJ8ZNUwPQQs}`)gZZ z>~kdJgw$*8#!a=fHzDy6Fo@ZdhXdS)nIJA7p6D=)0&o)4J+!j?xUz5&3r+(sT!i2O zSjtEWDWcP>z3H-Z&Nxg<6JH5_Fy&Hi5D9miWq_&M@_VFMuQqR))444*bVIc)?>b&HCX>MAN1L z)E}n+_V)2=6}cyYy@+!H1W4H7oF{-xNCyTsu5)!>tU~$-;Lg{|`U$I<0A5vYSa^3P znALRibWD=ax9(^1oDF@@pXfw;`)LS-+hiRNE|>E;20CjGx_&rGQ|^_J4Oqei%z8a( zkfp%VYw!v6RVf6}U`bV*!~OC&V2;)Ul>vIkzwqq9etM^Xf&}nHdD6TI!QL-ZDCc89 z*u9<=Ovil`2)pNa|Gf7f4gls01cNVCGq?~#Ah-h+1_tv%{rQBd(**Z0Y@N+P)6Zx9 zf54Lp=BSNVR|e%Q3i(RIJYzMs#@2tALF2y!V4y@nP(B$l`A0zln?xUU^EDa7f4ngA%>YdJ+B-0IB%79&qfAJbetCAOtW(PbI7+oq=M^Gpen-}(G z@HexD@K3VIzf>4&51@0FB!tT`_l|TZ<@Zh`*#j|67h_xREgmi%*?a!JeY(awE0w=i zS!SQ_Z_c+tK@kRRfcYsHZI%Xs-$9T+!}#7G1u!r90Yd%g|3sMZkiCPZX#S`P;ZwdI z$U8BC2y>rLv;*)dFJnf60G~4Bl8g_o>Sk9I5sHvt*8Jw*V*m|<)wZFZvXq}{N6=TF zGQj9-IR90YBtimAD?XS3DCoZsELaB`y)<>B99EbwYG4OIIK}5qcUP9YwJF2i$dV9T zW4kM|GE$vrx*u;`mj*TMu-I^+?P&)R14Roxa7v;^wnDFW$%Hxg4I zMvv#fPm)nuvpcN6^e$i4HA8^K{ueoGdtY-|t%^A^#+qKNYdZxjR{toeUeUL-Wzc`^%q}r56gF9h)8(c-S=X1AIl=NylJLFF&NMY=M)C*`75EOJJ z#)1BP?Z2c6=$521q2jPT0iM?GcpZi*m<=KnUo0IQ)6t*E$qWi-X=w42)GOHsup9vF z9%=DH_;9n51QI@RD_pzHps6ETKz*PNWet%;Ooo8zad|{0f)R}`X!l1ye|nhVq&FI8Zd#6StD5z z3F$JrcYsetrLRp815K3bR)e{Pt{5{w<2>)zE}$Tpm-($V3%o&&2>=C_1PuH#`8_4A z9$#d}z9)Xo6hn2nHdxqn&?wRIvh4b?o|XOAt;8aw4&$^V2>-knWpD z2N69?wlQi9fy61!*~?Ia2Vf6Y5vtyUw3BJe$r-rnJPp##Xdr!e6wbp2oK^_Ed@)Fi zVAhd`1U*@0$2-!zw@;2fFGdUd$(9qSW z0ipHd3-unq<;t&Q-hnSl-MYSBULnl-eI!^>$_NPX<_*{Ss)DJ*(Pi%oRjUT3YPF&hNrd)2Gk zJU5VZSeL3Yc$Str`|EzZ@hRz`*7_QW~8&_4T5ESTf^>AsJ&|6>5CkgfK6hqTVfvB7x})N*XtPiaWN4N`ma)`Ps2ch&s>RzR|;kz}Gs ztC7WDFG9jx7(kbSRdNlLgqYb%x`6&pPP^$vfMWb`FGbw`sd<}*Bu5fNwL%ubHQusE z&ST~WwCk}#X*7@`&xqh@qt{aVkBSn6xIqznj@K|fsM5=LOM2#x-KtpgN%jklZPOE^e)nHe_zLJ{Zbzl~ zhl$xY0b&q~Nim;L<0R&G57TO?cO)`Y`cQ@z_$43-EvtMdZqXWhDR0S9V;h)izY!G1nlSc4TfTzCMHOfW; zAw&17Wd)386GQtC}UoYzlU4HOI*YM_5vLFi-V1qud~lGI@4eM|2T z8j6snwjq=o5Sjp#?Dp791kg8#fo;n9OB{&^v`Z^SeNU305r~rF>=p_Ol%N5H(@9ld z8Nkc>hnW*V2qV`kx3K{5@-+9!^oz9AAADicqyWG`$}0g%PLcXnDfWby%W~F=KSLOU z^`lq$|JFSIWB@`kuQ!6iO+YP2!n>;hlzpEEfgM)Sbehb+GWTRYXEmM(#G+NwBDz_R z(LAHI)E5?D13E?D493q-P~IJ^2#o{!-E)5kl((uHivUaeewjJO0hZ=3w=)2iHp4rD z3rHawy096bc+0X2vFQLyOOnWB`)6qtd6^7PnxQO|5f{Y#8Z51?UpaIpJ8Xa@T`+zy0hnZaRWv@c=b+6HFogSF-AlCuKt zTbz(ASNZ8&#goXxl%Fhin6B0n0RfDiFHokS8EJ9*f@4iQz5%Fy8;oK^Lih3?!zYk|;vC&OL_}6%Z7 zz5G((l3dFCp#^DD1@iSnpyIm*k*ODOb8Cp#rXWq~O5FVo6pS-DesEQNN|T=4(#&H$ zrAce*fBsjxNdg1bLh3GPs|Akmr7bHx{~a*dLx58h2OWDd0Y=+KR3cCUx|r{a4ZM(S z<^bnr;t=|?%1NLq0VphzZ`bwy29~W%r)e&D8=`{{PyuU<#oEJRX##HMb@A!j1TGLt za<&Ou11=G@@SFMvO3JtoFr z2mspzhaimxh@c@kYDQ;`vk8v>DQ*q@4%}IGJmt-^pIhLo0|($l>&bw5u+JamMSIe6 z*aI!c7NIeJAV{i!Wcxe;z+S->#3JC5A#0qOAwAur!|FV)aL9N6*%6-lrv`HgR2qw4 zFSN&My#{Bd1J$DauO$Ty4Ime`;EK@!3W7BRawNF)p6)0DxT9Mz^5V6>q@`s$#lB;K zJ>U|NEd!Tkj4?bO5Qk6q`N>FUehKfQg;1@LU5pH{|B_sQc@+0!DFzH=^$U+J+dkZX ze|-$17v*a3$a;WC2Keu`e3XGvgro0Z2UX{*t7|@rz_ z5kbQLq&rYA+a$nycMwk_e3=QMJaU(PHNt^&(R!--Qm zA|(&9vkZ>Ou7`pU8-9Oa+K2pfX8(U1000;EnkNV-;9^Cth>#J1U=vdFFC^GbxK(lu z`AxOsW#H@Qei9|nKZ(h38a!H`)Ri7czq$JJ#3L{TT#AN2#kwfx^5*VB#dOAc>k zW7Lp+hj9mzPKH3zanlHZyetTYw(Cv@Bpf;b`bh=!zYOTdAAUfiP4>Wj3=(l^fUg?? zh@A?gI^;nlSF_}SaJKJo%ja-J!h5ykX#@0a-xuosKF}{%x7&e9Kv@tX8vw^#kob{`>1VxeT z<_l949fr;KK!qV@t^n*H*Jh%%PNKHk)HbWuqJ)Sk)1^$Ge-V8HF^}U3IYfAP0uq+P zoRwOgfkskJL`Gy`E5;o@REh3IsXvoHn+#~L+sa6KH6gZmY4rg+3Vo8I?Tzk)g! z2SKMZ3D7r#>_J1l+)##ECjEJZ@nrA-@8w+ag`a$J>^6wHhNRxhX131E(T7Y@;Q(ny(>g$Z&oX*K zI1{^rDdAeX{p`Kz!K{_H{xT1D*M#TjENM%P1fFkgZ%Fuk9Bk&#< zE6p-MCmEx|aoAkL64FRs(J!KRK^+8cs%++2tDO(&q??$-3#|xp6VG*8wxrr8Q(wV8 za{UN=nEIO%d~X>EA5Hb?FFeJgy{DM#=4BH9CGl{_tZ2@|b&S87bmBmFT)gv&n65}3 zay-zqYog!YEXnjw!fmysbW%!%-9_dYLihZrEHlusd*J@lJ~ZqCcmBa~kVatoWUbg3 z3v83tOQgj^0#2J)Hn+xPCgVXB8g*HP+r<7o!vQ9E*a``MBq9z0?yCJv{>QO{*(+w1 z+C^}0@`E?LI}BbXa#)5}nqLQ99861qa*TA+8D5Kl#A)=FXK7kOQ)>L%XWMSuO+Wb! zI}g-t&bAn-(_M0?(_9wC*J=lVAkZ-3LVa_onZMlN#oyf6i>fNn7m=I=4$#84Aq3gx zY-ZW{Gq>~W3e<8~aEnw|4<<#~uv6LF^1%d;0sh!sQd#AVkBJ%eK3wl9$_;1|$`3iX zD-Pp?D>iA`D-NgsqTk(aF0XbP?H=&fn7*CLZt!#yw%Yi?!MTm}w}N~4fmj7mp43EQt#O1V~UE8`R3)T)iX(J#rv*Z=iOMAorQ5h(>9^mv# zT3cqkcmaR210dcQ{}0IULl*6p=f`8eXA$vOQmYnSXDbl{wn1W11gJ42rz|Tsfox-9 zlPE+faz!PFzR3H5U zFm4D;ACTTvw;~TMI;?_kF_iY;b#TUeT(<9tq>=|JBabF>#tTi?>$k@h-!NRaQpiO2 z=odw9ZR$#^EkXYO#u*9WkF#JEo=bSlL&ED@Oid~KIZIKDPaJD6ewxkK46k)ZoEKhmF9SgZ4pSfZVVy1w z(V9qS1;54-@}{iJRpUr ze=1+=*nx7kaD&Ay#8*5rhG3Cv;4klU)%IxYv$+7!i0Az#Z{>A-jVMGiqTBo zs8I+0q3`&{#r1lx>|n!u?!a5@1luq+#X==vdfr=jQ>8};9|F zV$#1nd@68$*!p96mJQq?Dn0LHoH~i#u(>5PT}5O4ak-D@WY!{4c~?|j3SCjVkM&0> zH=X0x{>8SN)M%YO_K>Riz{szR+LoDevq^uIMJIGjIyLe=r8>-rWVg7DhEt8Y{tq)r z1@|2L?cL}$GQV62E4R_C=DG(E`xB%O8!jf(hNZw78Xt?}_}m=%_HP{X7;W!aofi`2 z|4ewG za#$<}ck4w))ApUuM-vtne^P4@UMG*?w1x;Zdll7vNa??7iq+|xJQ=5vsS3OAw=<5; z&&>5qVZ2N-v%wh)_^zQaxovbdS+dHKsX#2cIe&h3FMp8^Z<)eu)FgLS&&E&J%KyX9 zYSaFmd$c&EgA%%BBc~&E{r!yO>~)If@{gL+hGd&#z2U>z1qVmGem{#tgGtNr_;I77 z*f$$#MObdaWW;!82i)^G413VuEk1w9$r&!;4;dw7ue7&WC_a{wUjH6tI(Th1Zhy8i zIyg$tuSZV7fX7yS17A}iq58)`X3v>omB2L$B>ncT7{A+!N)4CC&PpLW`_lcf*|>Lj zeDJi9*wkPg^czbR9__G^8a#3?cYWpqM&GiFa=p)Q(5+3#RnObdK0j6@;QR4QBk6&y z{@CuSp6$}I`C9L{)Mc>Eb~~%C-Szm10?S1^Rk`VHaB*eKc&;#SjbV!5daoB@fZ}qr zbnz|o;0VQ{OAhk=d2G%qnkwPg$Iij}OTK!7Wo2FAeV>76s*6I1^gRwl7)EBjt{PHO zeTl-x6>Dk}Bbj#ZYb0*DWrIrf@_kG$`IS$35uL4T=t#=gey80#*c$p=$QMZJ@QYyH za{swctsQRiY5Nm9OK)_O7Qa4FWqP@N$v?EptUt1$zPy-&r%>?eUUlIkc9dQ88W2Og zCB;a*T7Yq~9VS(E-lyI*>>f)#)O+MRsk)%uuMkcd`Q>=(H?1rYP!luaSv4oQ*&gP; zdP+PwB3HIK)gkd^|8Tv^(Cu;8Ue3A)fA#O2_-8nl(*Sy-{Olsok@a>$mc*WJXEqcsO6$$YHA<#B82#xg$QqLi}KT;PW!IPLXg@&n>I-C zB2=xh$xS|f6U_^<4yiy1I(o5uM7K}6X@Ur-7#YnHo%S|a15IKzZr6ee%5!o$EL(^)lHEit+4-@a<~P#)PWZ3<%y>?Be!I_ON|Q=f8xP- z9yWlYvV$wl4<9YR2)s=rdEEKA7y`ryIS70lwemdEbcu4!xFn%G5P9oMSnJf@jsXr9~Z1^EL})8f}FpHFw5Z!r#E9$nI49axvFJ_2tca`yn4MY^(= zpG*Dc73}pbu3It=NVa+YA}FGbH3Muxf1=SN$*1xY@Q1UF3;&AbGgY-WE|QaBtRYc+ zj{j&sZyU342~HswBy2V~%*;R(`)>a1tp38Mf+cnFgN>xK0AazHxdSfZ2vK}e#C~F^ z{_W*qMuNG;MXKyxs$hoG*;K@bHfopK%l*dD3}mE?&x}N=CB7yjU`kbq;&L(HN`cbF z_~hACMyoE<(gU9QeCP|L8qHpLYdfV&ibpyEXRDBM(d{`3 zMfv!=6@S#Ln3M#q7qGYaK(f)-tCmhHwZb76t(ITu5h+j zer~1N87@ZkG!4Oxf_&rC5QL8&!|(_%%pFT9FewwRdSvJL1DC2=Y>u&+VS~Ye?86NgJw-TWN{*64^t00w=5!QvvB)5V?+V$j^siY}f1GwR#h*y8 zxQQC_rXr{hbFIQH=yx)*u{~D*6tQ>Y5Xl>Ay{zMi?3pabcJ-m>*?0YMraIk{tme`0XXT~okAbFSpMa_&ah)y@_CHnW_ zA<;v+&%9%Azpx(=O@)|!ca9hxHJrcP{e8GU(C`@KsK=v_r||w8?Endf-ipN8z;2n) z&c5A2T@^*e_E^V3sA&?dP&2Wcij?{-pO&$1W)HRqzaVR#!Z7mTD3jCt5S`i<_O_a_ zOU8|@(OM|nhd)wDH$FS8f8S=(O$?|f&B(S`~NwTg~1ejY{1Q=2a)LTqSE z+5ay7_~7hMM9AmOryPFlh@RW%Cxw*7RgovkMWA@-Egjmf`hi6|NoEBIvVRvW21J;^ zYKri8?4Dy!W)IKT%;=x4niys*#C?dUXhd$OMm9=sJ@{N0FcnxrhQmSM=LL(b?W|3PrD)(|e&)`Sk_y7kT zd?Okvyd42|uu>?Nd2ZhQOV^wGM+D;mTvF~}B8kSksvUKuXK6aJDYdzlh_}Wb?cVjt z&OfmVl*tmc8@oN44V>nLgZ^lIo7KvTzREbgGBELqn?P4OyZKQ>Brh^*_f|#GIIqA4 zkDG6U)%tS~9?Np!fz846vYY5~iClLR7dE2?8W#LXQAcNRe>L=yAgFW5r9W0jb@GS-l~>sAWd?&ts3#iA&c&nEb|K8#q>jNgwpbTx@4`S>GUDij`B)8>( zbs*`Qd{X${r=8fxJJmYb-UbMo|GuYq7@qwdAWGTg@w?J=7B6S3sGmiFmL9Qxh0Ya9 z7o#$T8M&jh-O$_xlq4CP(*~#&`AbVyTG<$y{(-eSWip=Q<@I0Sl6Bhb+kbRmp_ zlrNUllj>q+PATRT34>{y=d?BHupPWe1~4q8pK&`k`x8WeETWh%(~eG<2rvNyTMrCu zGsplLd`Tg$wt68r5Z}vhwfq;?iy0l%;B0m|=t23NMtRv&>tV?6FO(?cDY6(05X|G- z2~}{+|I&Yjvh;d3QN|UtX}-B^>rY^p`w0Up7s`DkailX|yi`+8ZnHY+oKYA9L2k;i)Tb3{7i0 zH#c}@(r$RbWId;x-lnD7aZ;t{vN;MhQr3H~Du%fKh7{pjqejgqO|x+hr2-KJC#(Eu zWtFa_9E8Yl6nKp+-X_8`CBqJn&y8m3wTE@=A2~W&xUja9GVZnSB=(ob7I<%8m-(6| zAKl^fgtG=HV;{bFO#LfyTPxH-Emz5+kf$7Fwb3J}%Kj*#>C!-($vr@B5CeNWeN6w$ zZ%pZ>@*frhsQZY9;nE7AzTkN6Ma8mzj{D|Zp`woyGf#f()_~vNV~Wn+E|a_5r*Ohb#rgMmZD)P? zL1)Sbr11i`+-S}Q4Ac42plDZI>KBBkeG&3`c6+aB-w=N1?uLiebk;?Snpjb2M2bLx z-+x!$f~+K+blkwatl;e+$pK3(OqlnRx3|j**TUr~Rgm6WE}^`?-^)YcOFB3Zsxne8S)~G~WRp%wjI4u~IHX%Y0o+x5 z_U{5A{Bw(Y3G8g9^CJ5-=dW@DDk6SMTWl8cjy(*xz9DTcow1LIpW(GCOyo2?omkvH z&C>LFJs5|uUrN%zV#`%;@^{kAZhl+^Cf7s7t~sgvXms$y4Q|?f_0FFc=R!NA`#@%M zwGlqJy*!!Y{Tf?>vP+^~gWti>cWE1D-#p5e1zL^hz2_`Fw_W`7tDxZ8@BGoOnWok5 z){)Ve2Y+D=${9swXH=rw@$?2MG@N3}2>hov4@OIO&WAs`+UeCr=bFvco(dt&t2Giu z4Lxk!uY&f z4w6Ur%$w`}nm#qFzS8N*y|4eaPTx4v zgNQFwqj9ok;c~qb`pIzs80jDcr1{VqK%uU`c<1g2(C{0H z$>g!I=ERbfW;o`^<*<*A?E%>Aw>l+w{0{4UK`jz!r^He1>p|Bcjo5~{@7qs6eUIF$ z&-+FgSR~OC1b78UC&G%_YM^4<5D>!~MnK%gbWB_y1m}4NXZX;{L7$-;386M64sdSy z4F@*hI*u_8XhxXo>+Tr&$G~4xQTht>>50oBj|^tC5uM0Y)AlP??mSY-MKVfz-7S3| z{XsteZyAZih0ayv{+fy49TZ|aC-lPyCIro(6QFJzoko=(5=pSBR{`pcAZ6m2>|VA& z1QpqgrPS|26{T;@Y*wX`Qe!>O%y>yDrV#%rOlPb97uKhj^}o2mCiQVd85`$lCthMKUMvtZ^CIaQYsKxNhJy%NK z;Z*)ntYh_UPkaw$?C(#>u}}n1QJ2y~cF@lwC(%Ju;0z$2GZs+h;ka-tP8Q3HeHL~r zV1)~7N$lwMY!|ouR`h+YK(nZNYe?1I5_GF!uZViusIxo2LV<>Gz85rBpn?9pe#f@f zRPg+aOzXSzcF%|NO--A#rH#jx$I` z6;9-KPWEQ#5R99*_;E%_E&p*5Zocsl>e?P2#Suz_0EM8*HK746@O~V&Z*4M?GHE!+ zVpxd9OsBdlhf9_VWX3EwOv57mU2ZP54rR-`mBqBjK{eUogB7rz0@zyss`)Yw#BF3BneH3PvI#663Ve$JVZ1 zba(ai5?|?4W*Rq$m_6KYeiY?2nmaX>nz4)nw-g8~WE?#L%9`GD~_tA0?8k4GhVOkwC(vTQ^6ZA4uHn2Ia zw;-X4iia}t!ekU#tc}huhv0C>#XCo#ND5xs;@jFcl{!*E=eiXy{`AUz{M?ZEu16x( z+xiywoKXJ9n}}e%*l%AV{b1*GSAE<$3*(6Lvdi~tWUsI^8ib=T_QI0-X`0yZi&MJUdc}cJ{>Y))mbvVw#@RhgT zXCXv-;mZZ$q)GXMx_b@YY-I&{FR|pS&O%iw?EtI*Q3e?LKTCbyuVXn=WyqYO&V}h1 z3JBX4N?tSU%hX~y4~{#cRTU+rQ*bh>2Ok-UBl5o|yNGD=epO5<$!m=}^!rUkftsIl zMIOa&Q|^)v(cgOVSD>+bo>nV1e#TsWq0@(0+o&ivYG>>|H<6DYeNCXIFey`!D|uEh zGQLOt{3(+CO&+$BxUbD4&QC|9(8)`jY}$lCiD+$eb-P3c6nb(7{Y2KL_9W^%5aKNN zLL^d~9ysm!YVxPbaM**(W{U*Q*v!Pr^SYC^ktQ{@C;6FsB&q`{NgGfK1>8Q1-GzhF4A^PfGU)%v@9Zmg+CC%`6_xHQc@k1a>R-W>#i9FU%-u)Tr zEU2MpWMov~Dt1reaU2m3`y-tBa64(-1biP)LnaLDBM=t0@j-R8u`dj4M(D(D+rqWOV{}`n$rY%W(x9pytn4l-&E{QO)$_)zF{v|{M444@3AT(JW?u5 zQ{ezrq)!*9=fe`z{7Sc#%;;K2ROOpegIyYxg#D)%@C&u}F@F@oXGrAE?Dw_jUewwj zeD+ym>xw=5wmr)nda?H)@;oR2Puwejyi)%Zn|o_<4u5~cKN?G*B{Zyt^e_a@O?v-3 zBJ3&2!g^i%=(d90kwz4PB<@UINuk!^+G=rZVs#3d$N<*%OyA4Qugnc}cL!V( zzXsCLRgnz4p+A}Q_Bc*JQI@*S3Ej{&r){~uA59peZqQ7>PqAJk$jSP76FX;mF6Oks zseXNUz}&qp%Orhd*|1wXc_MDZ2t5)o5M^^PBs~y5}l7 zBU*ZB4kqQ*{edS-$*hG;$f|w6?&`Ob8&66}%RARccD!jGch|p5Nls5#(S>R1CXtNu-$AHvgbC#t0h@%kUE7OdQW02f zL#nuelm2&ceT7N!cB$ydaUrudr&;d`>ifgvy)O^PVj31ozP5$<6H0gmU}L!+T&;#Q z{D?SEA$x?26ekpa3dVcMFzsfZo{X>KCRPC=d!gd9(cYKUO%1s_w_DYS zQ3@6(XVVz$5}Gvc6By%^Ux>fXzt?|!v(wk)p(gU;@|T6u@_)SmGD-uZW}_TnD68X@ zkGHu^YpCQBF{M$oDrC_d`UZC;c2pOu3)~Tlez+0N<(9%%;$aJ_;#~Oj61*U%eGC1{ zkz;C+8tEk|1FlemRy0SB-F{z2MIJoM?RN`?^(dtD zx-WjC;>gqZBT6vw-w+*%&@gnch_`Y%$~RiEaa`t=QiuFwDD>$icz2)V4-XY zYl{f~+CK7O9*lhcW~7`31Ium-_Wc-HwTO*~^HVVm-h6L_ZPOu*=K4iAIdGplR&vIL z7;k7Bf z|Kc!WpKvf(xr=)vd}-m!vmt`QN?mI@?Se@qlN86LN|d1oN$nw=gEbx~y^|WDYGATJ&Xoom+!}ZFav3|Fy#Er1(|au2<|&>EwZn8gEse zgVfw1sDR1Ao=)8 zeihwN4&y}EYTi84L76b7lDPW0LFDc4W<(tBW7Tj~cDqh3${Ne>3rSoSIVQGys%83! zj*hr)V@rfRyAj}=8k;>0FD2ez8dTXHbh`Jl20o+HArk@dJb{AH4pdGnPWA5_hRi4{X0$J^2) z_s^*L{%VS*N_`V;HzM4~o0H3)q7134@A_4XhkP(Nl^=OO?^Ix<3ez;bVS4OR;#Oh# z-Wrg_D5IUNZ!E_oZGkSAiG+(oy`w#G$1%!Qt#u$2kHe{uIbTQC>=%~zy?q8|ZnG!w zAlS5)vbYEdNv+BeRu|@nAkFRGcG=h;M`z2FWaz-$---&$UkPPU`{vH-vQ!xQNJCOM zK79^1c{FdFpPWMDtY*C5-8SN4R=x)UcGH-8Cp{Q1pB;Zj%?^RFa>o@{LwO6}^?LHn z|4Lw{sx%h_{t=ih^EJm8VW-y^Uy)!d_?V^g!zgxw_NZv_F||wO{-e#`VB6nf1yWv` zcE3%2o(tVmIuBp2lb`k9^_P>nhttMScU>N_MrR!+Jhr=BF@lvN2zPc3Xmi%l^^L|u%a(2f z3&i0NPil_GY*nP6@2l3TV@&euzpVdC*?2T-d3Qg#I96bz8&&KP~EE!zL$=`FaTdc1Jq8Cp`LB?W;&LRy+ZO6e|%K~g}vbLcJsNl8hO z?(Q7A8>G9t?)m-iecvxIYr#6_?7g3Ci`0DzhqnLSuNfa*Zhv4Lt)TpqMtUaECEgnX zep9vf=AIj|8x=kC!ZXHrncGy#DGf+;^)?OSu{Vv^3Fv)z^B6Ri$8%?xTz;U9Sz#s< z_Rzh2n%Mm=teB1=Q1w)$J8(rW*vh$nF_8$uj(BJr!gY>etZSrdwK&$v!~1OH_6K;d zLxj6I^)I^}Qoz>|e#U%1OSKi>7+vyCmOF7-FqM}LO1kWm$a%B~7ob*K%){sJxf61P zx0dU^GhR}^W*b;2GyVyaA@u!0=Zb90W7qWeHR(jE3YdpFi*!49 zNiyvK(5$rf2ni)B4rFI)DljmJmEg6+`dN=5Y6RutEH*NQJu>VZZr$%d7^Q^CjL`@x zR-&eox4UqXk)2Mw1s};xZB=b7l(U-Bx>##pAMN5QO}!? zZ|_1gpK$i#juX`bRuFHHTMv*>8RjGh0q4 z+?0RU2;&?6)2hF=+!K(Tr^6(Zs^?@t!=v*+%sUZQz_K8_Y6xRSP)`0HbP;OTci7FV zbRtkW^YGJnoy^Kn2{-jc{9`8d)+=)IKgp+o20Vrj1UXX(gi?yBpDV<=eHbK#Mif62 zGA7BUwR%Ik19;3LsYREIArmKn@@E3r?Oz~**j+#xlw7`?=*7OenMprCbl^vY);b@~ zSGiwWq{}sa48>I)I3}Y>0toV7I~!KdPmk61Fov*jBBhgde{_XR_9R6#!K~yq32yk$ zmq|7)wrhUc;}UC1*jBS;zHQlkL*nb#YcArTA1IZ--?0m$prr#*ZQo35lvZE|On{J* z$jEu-@!^5g3E+`ZeXfD7(Fm1Lpg?s&>KlVt5z#j)npFK-{c z0d6Rs%S_?B1IYktGCdd5wVk^XNjkvOgw|jYGbUZ7i+D9$HNwKko%~n2_j3UyeCYgZ zYu_t0uDKZO2n!OJMiX_JtlwW06A-E2wchWL^M0HMsSC0soi0H!y|Zg&K<0__mUbr8 zIWG7agh3^;c$)RXVVS+N;0iT;nc$exBrCEM$b$K|PNpY>yq+CJ1M5e!B%YN41h;wwMF!yAw_JAyG^a~QB!+sR)hECpQSAmto^NLh=BP6pNe^fW;q!K+;$($eRQclD2)>ALlDV)w(Y1iaNzc{x5;MSGHXFQyS$v*(tC?! z#64Z23&`M%`qCm@t4wff0;mtEObHG#w*(Q920o9~t_+B6SE=Gi&;Z0>`7+qfe#=tO z3v2|MXGB?*Lv(X$g*-oPT#al*5PjAbJlpOOqm1ZYslXu?nf|I5=pE*woUcHcK6w)M z6y-!akJj_nyA1u2G*7g>E6PsNR9{+5YkzIw8z2l;uzXuw`G&7-LqTx#RizJ*|1fbB zdHmj!3x4PYr~?1N2zrEyo6DYhli=kq&Th9e2(}2g+CRLK%M`CL8DyA_6B4FoTrP0C$?VE)Uc+R@M7$^E9{@`H&_Ib(TpDr3*I}xFks8oF4X2IcPX^+g4r_#Mhjk(fi>pqM z)APJ{0zKC7&m7e4#m&FO4bn>(>ze`sP`*Eg@cgc=tyTTn%g}RCzsj~sFUvQV11OoZ4tL7#@juVfb7dymPS2 zWrf}F85wPN0OxXd3y~zN%CJW>87WBc>QK4J&A4@7G4Z}xm#|arSJ8c%7Nc82DSZW6 zFp5e&W(HR49*Ayf%Aka+rdD%jN>e{~JKaY?{_AcAw;|?GYV{}CxV6Jt^R+PkpS$@A zDN>dHwk^JargeU<&c-!+-4jn0FG4qPj2VKNIaT{j&sML|)pFBXj(CjZC7yuM|8P4n z;s`H~N;6$;N;UUK9>RNxFwGoIGH=GQo^J_CcXM^6T-mv_a1Gz18c3oSW&Y!{5E2p` z^`+KYg|$?Q*40r0Ld2}Ec5buO^b76t>R9fdZBAJs0m}*iILq?GCE#T^;Rv zr~0F2)h~wphIG^#YRBV_t@O0b;`MRvcX#+xj;Wdayz;JcXDUrzdQWjQo@?E0Zb-7$ z!8Jb@P3lowzN~YKUzTDOhWmJ*Q{JOV9Usof&OlnKjYJ7gtAlHDopQEcl&wx+5=I<~{=~JK%~jW~ zmr^hE9?_7RN<1jbiKu7S`O5atTA6}%ewx;+NYFNaGDeWbn83zvTLIvp*r_IGCpdPU7o zPc2+^49V_Xe$cN~Uv}ylp8fRIjDp)8!%Jh#^Sj;o%onxRG~6r9RL-!l1c<5I2(bNN z#5Ksz?;Z0PK5^Gv+mx2D_-u53N~nLYc$CKdi;~LiN_*WV?u;jO)az;qnT!JhAMnxH zGyI(1ESL0ZL~A(lTcD}yzPt|4hv_r4u?f&GQSy?wq^B*f);E%T|%l=fEwCL3@!(P2lKHN+=nEUcO;%G9q zg+)i=M!ok^u(r_kI-S-v&j;`Bq7yH`01eZxdibDTtxXU5=suZVPCK6vm&m5P>Dx}b zFk>X89Q5`;-kSgHSC59vzkl>a>J=yn%v#WRCc(C%q?uBG4EyVPlmBjR*!E}DfH^(t zAbGNlC9TDks7}jKSbHBjL8~F4xthaE7nXMV?OYd9E==qZlHf*i?5b%(t=aA41m2y3HYpw9wSH%gs%OgeVHc15hib$3l;h<#+Vch@!m!_8{ zVuq->+`MCesM^F_Ds+V)lMd2Z2U|~N)(113U0=^+t4%ZuHUIkKSwTs|T%zU2i5!Ls zQHmgpc0OvK@Z$i%*dPTorZO%MA`c1}P+Y*+GDhfdvhJ3-8at%&MpwgDTpMZhHK8b| z!)_CQTpNs<74ZO|II(gBTyeufR5I_hl|#+o6)TkB9X3vtV=D_P&kU&1}1(lqi@zRD-qhE;g>~& z5NM$2dc+XwponL}y3Cab_^t)O z0d94S%Yp+)@LV9Q@}u_fpk^sZ3wn#qKo7S8l~ui(3@k=e$fUkb)IpBUL8C%ddKvaT z;sZ!W>qUPFhDe;#cUvyt`y8n;WE|s z;*w+#JUi-%{zVO{CHK9U;^-CXO<$UBVTx_JMEp|<$nt|{_Ux*!X^*wT+6 z(V&g?1riot1=AOwz2#vef+S6*+#hfG@2*k(x*ruo+wn6VN>4Cl4XM2JSBvW;FzTH^ zr`}Rj>j^bt>?=(u+0ee4@~lBmbsdU<7;z%muHN@t8tf|8?56K8>Heh0+2IOtwqE#~ z%cRo89{DSiIi~HM`}tO>zZ zs@qGz!{2SwTY_D0M)CLNvl1cn)>7P?-tTKq$oU5nN@)ESYOb?PSyRlPk|(xR4mMne z{+6p*+d?!YzjmKKju}mkX=)xxD7^lD$CglR?KYBeAFJcOzoJQ>yjz36CcZfEhe1Ui zS22RjY!+|Ib1?&;G(?|mW49dbb~sPIe33hnx7k8;`8a3q@9M3Lg|!QqSD%l+bQd|q z3LQ^IJuqmBtWnS|0YvI&U;3x}nau&FRp&>9TVcqkZKIaSD^=ga_;Qg@sjnM*!f)1}jvbF@8c|}h)f+#~Z?;bakgZt-o+~|; zU12D^eP-t^cYBJxI1k0r(aqY+E&Q11&Rsv2nNJ1bD^M_N@ZL51tFMFNlBHNAfJ?sE zVi$L`(jskBemi%Rg>;G9Ch>UE^Yl-IonOdy%vC1!$b_eG;mKy^@b?WgxPodk^2Va* z=k>4P7EN}S?vw?`5}u)|z=J!(5@5b?SuqQdy2qx#WSDeu?`JWGd@40OBNo^gRS zftv3@|4Vjtb?s5QuQkD67Y$Wjn^d-brB+{1okdMwNN2y~dw*l3_YG9yQxq`4>%0e@ zF)2iJJl>Jrpf#Uu8kCGPf17Ye;VVd?h>FC>LApJcsm>BC)o)gqYF-F|cb~>VEPu)& zQU+GA{q5zkMv|D2h0lFPLb&_w78`w{OIGUSK_|_rT#Y8m&FF#SHF_S^+}&6{63U;o z9F9a?0|YK`UtQagVC(`E>FlF>rB~7`x0U?=ah@lzQx*-V*juF4lli}2ic6ldueG0R zU#;{(+F@D(J|ZbggB&g1UjNW7xH)x=ymvaSntG;GDZUi-ZEsl- z74{s=iacxNQ{}?yCT}sFB~3%)z%lIV9Bvy7V_UD*X`-?piFE%1 zEljQ&l)vk!Hjm?G9=$(US~2xNo+D#YPuY$TYk7F8a}GojWj4% zF<#;lNaN(BWn}Z=cp#cza^gKUHi0kC%+?t*Nc%KFr_lFT{IHPjMthI9@%vBHODZZ# z$}2<$lKms;o*DnC^NlEPpkm(2h^$Wxd zZd(73;q1e*-^OI9-`Oz-63Q}tnLsi=7=+)H1l~|#s6uf2p?Im zvfD9wP*V>;G|=Ssgp!a>G(kdZnB(WYP)`UtP)}4BhDA7_G|-1sMi`%=yNkC;JWJnJ z*G}}^RSME%48}r@e>n?5H;0E)X3~IAfDoe%?P~YHj&8X&KYxKOq}Bnx-t|dl1fkLS zlAKy54Po*kVN4o&>f=ph5$V)lp!`Kh=`}Qqt`}W_opT^R>~+(;R6hU} z;}M=`fxl6z>O(@MMUOnOC4%&IPBD|xfE@!ta!5U5>Y!JgD(vE)3pvy3kO?okJbJla zHxmSi^GCX?B=mxIkg&_0_BHaK0)&bHo^$H;=ga2>p4D%eH1Q&!je4z$Uga!V9gbm0tzP`LYLZFUqt<29+>igoQZxO z+@udxiSP&Ai-C)K3=!>3Y{)HYfpz_dD6?W{ZL76^u9eP>L{QgL1T)`&KAI>A!8g;~ zh%8Y+`LSsGtW8)CzTeRgkyi!S85>?fp4(13w;4eO?8``qM;jzm;qs{eycB4>A1N~x z_)bQO725ikOOe~X(c6hKh1gB)l;z)NQz@_TCAFazN1TCisjQtOmyjjiVa>D8W19)a zuY)?s1SW6kP)^g6fVmtP$Ec_S{Opuy9GAwdwqQ1HELE_FYlb)}isgq8f$6V8?MeB5 zr~p-*Uv6WWO!d_1PPR{!!}GdLs_A%uhKyg-g%KUnH>gxf5piAs_tV<{<5VPq{2*ni zu6T);b6;r4SKOmL7>U6B^IdreuN9k+8;S4in938Gyc#u0r#p>F*521Ios|4U$dpE` z(gyE|oC;kjW@T_^;P|-k`VI0%#E}_c(D6t1_s=9KmW;*ct{L3|=teo%Dc#4PWP~&S zg4C?4GyHEOsZ}_T-gs2tsscY(JjB?_QoQgr+X!`^D!tAj`bw=emJ4HgFOkBKri9HK zxBJETREdFq!}Z?RBojR(t~s9B^6Nfu=xp4>TGwX*M7#oevdVc+HF-RvjrG7_yp4&- z`^_It6DLxiF`6DGOe|DtbNl8H`7f%!ugKgS^7RBskDqlnRKSL|HsgOX=|yaOL&tpb z=jCIH+8Wk>Ta@qMB3iVfWaIx%YXm^^Y^L*CZgf{ttw#gOF`P~(1~NXFf#p+HL2nw6 z35D!$1oy`EZC2jU`4?89Ya6<=!#8|`^L9_*4kX>GTP~d84Yo5`eoU4#MvrA?W-gVM zyDIOLlz8y=F>kI1`(?QsD{*-Z{AWuHiFjPejS(cuLnGct(M zXYlhVret08S-k(1y@&hqspv6fB3W2^_hLEcV*3$@q&*bU-qkZzxm`!w+@0etAVKLf-D>Ol8HFz&nRA@F z-S=Pmywe#2bK3p^`AZDuY>nRS;?^|c4HD)?4|vO^ZkOaSo4pU41ijpPcS7Yg*8twU zS=GZY5dg8JaX@~E`~|2-;`cP!s+dYj4>TmW#S1AOyXeHt3*0%=dJ7@CfZHa2-sGYh z`iw7DWwyXJU+MHAi`A|_?%C^|Ub?p3`Kf6z!C`G8e|aSnTL<_-B3W#$U9HAmTi^3X zN%^{wx%DCit4;ritV-co#`vqA7_X-beq}09z%Axh{??(6$}*ChB%h0?&yC04d=tTS z0}5u*R@8pAIh4|UFIZnf?8o|N5T-&zs@TLf+gN&$TCkLN zu7Z$NEyhCq?CQgH`qhrlx_GT<0CSo=yA8!5X1XhC<6b?nfQ;7RM_Y-py{Z z;XTK)T6~fEo@*Cg>h? z;!gpydUO*1D49l97R{(6|5#(V@y0jOsTd1t0Jurfv(L(fFOy1sG5s-iL)tP=jeIT6 zE{x^dzbPE?;$)LVf3?=`9&`FbS*zGUrSPpxAl6vbC3!%oUH~#wi%-*fPbEiyXdr<= z?DG%R#P_O3Xhlul_fbEoM;6#(rR`(t17>z+G?Uz7$KFO{TUBDR-(Y%?ba7(EC26*a zzV9f662k2i4X_xM!2QzQkIku-_)hJRTglo8%Zfo8C}JyJ=k}|Z9UCrx zqGV@CDT|rU)gg51DweA&1z<&FAR3gw8xN!eIxw>8at*0RI#5`|hK$2>q10$E6R}9Y zgFnXhqDJ&XGPvRmS%P=f+*r^FI0pRNgmSUdJ093l)zKZ^_c+G`?RZ-}$?8g|x-=O= zHRonE4MjAinVB?^r&=O#P@X=!`QcopG-0**813OpWIT2xX+zwev><+BBY|n^0*8<> z2c!_+hIT>l7wTD;O%BDuz9-)yk~~Z>T2}*78%44&dY%|Zuo$dDPCsXr25nkiD$|P< z`98QBC+UUy>Z|BpqT{p`ICpE_#ho|-*QfIScp|`cOlOyiQ%wl*3 z{cI#eN=cJVx^HkYKV;a9<%8Yh$-diRfL07Wg?gG#D1EPKW$l5d<25-ual0IEDYX!F zOX!$c8$4vi&%0flVRL&MD$I`{uohfTU|b2thvI_ll5V9$kqGFgRd!|`9@nqG%7X}+ zKI312J!`>2Q5y+nC#EA02W`7PU%ECuc;X^zk2JiZNB@eN-P)&*-7;%ykb(c zj4?Ns(7@$*F>g=Zk-*G9=qmq=-)khO4QHL*v?>I3eWjj)!8#3b>1;|J)%5r zu&5e~Y(2(KbmQBkr3@;jcVb!Mbo7_ZM_m`vrz8DVagOEoY+$N6{k94m5)-6d6PNA? znRE0!_Z~{zuKcJbTo#zp)nDqj(C`;C$BZgP`+0ZA&9p~LQ%S&t`=lwBjzIlz{qWUn z5`YRs-|LY#Ax&hecbbm&9|lW!HkaYXH=lR_a_T)n8_pJTlYvCNaPN6C)Da4PF{61v zsa(ocX_=?1UAP%z9nWf`Q292r<9M-2{_)*DjEnv1GQtkz{ffQ@pFL}kU7$tN=eeZ9 zz4|*;J&E5?@E}ByT*=n_y{U%lBe8PAnrg37Z1knm!kE8w;TPF!@HzkT(C5dGXrmst zudZe72)eUU_8-eWiM^&ieoFdF$k^od;6C;wd_-Mfc{KB*$tzW`oNl?rDRIwRUp$_S zls9I;OIsg6g(Z6sTdH~n{%VDrlFh+hRG(Qx zM?|li2CIa86}OiiBPcEYRd}Du{ca4?W>cmMRyw7nwkut`tX=WRAz&OCeUFI0EGfNK?FeF}rG)y?p@Bmgd+YN9-p{X1@(r z%7ufN5wv@zrrH*B)_TVnF)+toTm{p_zv*IV-oRix5_AgN3l7pIIwVXCKJ1SEzAZ~_ zIGx;o0(DP?Yv1&<(+7obCA%j)!``=^hiv934*?_;8=S*VYn(nLdp;*%aixz zh0WnQO5TRuk#3w0y1j>+F;omvn%NdZ7N~Y!I~%`MECq5SW9o=}hm^WNLD;jNr3CMX z(zT=2VIRazKwsef6xF2lf4y17mrF%KaqycveqDICCTI2MkSiF5p7|@GAR4)zS#cWcVf7F@Le^7*k zI+SFRnd!@~mn@Iz)Foyw1r@78g=DBJ6X_}GBTJxf=DjPje`!SwGP1Yz1w?51%Ysne z3MLEn!r=^ZOjo%2?G2s35b#1Z{ZuJ^4t#y`R!K=c$!~~QW`XWkFLKX`~pXUv~`EvhV&LbQkl+}NIKcKi^4WQ;cuwlgZj|g20p22 za7>Dun-_etK;doEdSPlz7rRZRX0V0N9Z8Cj$;2hR1BsNafzXH!GLzhCOQ&ZN|W$y-u1uz|O=BZFt9Vpt$ULgk6 z|Lo;2fZJlkep(2@JfNM3bxuxxD>BCFu zel>0F0T>>@yHG3*RFbsY>^pkJL)+`>`0{xXHWB#zF%X#FGHNLz~>xLS^lV_Fwmr`(p*pJUGdd7G{`{ITIn68tF-S&sp= zT@)4Uzw8oK$jqEuJnX;DLfU8>k2q6TZmk?`Wp|#%kH`hl44{imK7by-9x4H`UZ;`h ztELhX0jARw`;_(3g|~LlXs+o$c6_64zWWwwI(SLcV?I-EO20bG^*b8+<;tk(sku>L zuhBWE0s&xyTJo=mwu$rUzL+#FrTm2wD_F&1VR71fH48boFxB`~jhm&7#l$e9@|+Nj z#lu^jo0v=F2)_@rXy=djt*7P_mc7=81%`2Vd8hr6No1nIUm3f@zrJ%*&jibznrPbE zR*LVJI9`%?ReH`J^#5dt&XCH4C0-TdaORP#>(p2mfj2iUnG{v4zkVOnIOC}4H#gO| ze#V~u@k!LslbbcAl&i`&+5RR3_O}2_RR8x@YD){>sCwrs{ z=gZ?yPWv-I;^^m=^f+%LKWe;X$<$0n1vT35(Ci0OdW2iC>bR*|8hAC+)9Ug8$FS{6 z_TMVjp7LSEYYq3aUx!W7SsLL3|1>>5kq@ZTa`gMBMBgXkMC4X_P#!k6CDIqFM574} zoVZ`@8%3;%KZ_jKxso==9MQ8Lu1z(5GwP!4^n|6XGlWi|d^bZIW46iH^bZvMzpyef z(OyTn%Msc<(J|~;Y}}J5ELIcllC8^bB((G)?TC}$I_AgFJJrH1b%}<2N=Thozd(v8 zt<+@jfoohc#TG;8rfpqtaYWt+&IXq@hBwamf768w2*;Q>L(US*(8fc$7;r&2?pzFA zD>&1F?<_O+@G#O%%Lk<5(C16_mOOcGL`7avzotb>H*s1b_#A3+jHMJ%+yEVlye;$&e z?xdF{)8hIHI%smfFAgk44K~I8>6E&8?TyNsy_Ap`8h7V+!`jH{OD32TS$qx^sxbO` zgVLi@r-hR6PBSPdikjg;D$0i(a?|?!C{AR-lY1{0oQVmwdS7|hFwnxI_Dv-bY${Nt z;g9o(j_W0T=&?&vsv%FVJ)L`157R$zADyn|goS6ELA;Q?0 zZ=_I}>a=%6|I^rMrKGmLL43|?%TnM9;*|A%dVkU^)9~WQZXDB~AQ{ej1ky7zwEe{& zjftJyI~R%XL0GT%|M&>5))E~|N|sId7Jfg4dtVgyyY$D@pMQH@8PbTI8#hhfA^vwK`}+w)YOlj5U5;omhfj z!THZt>}K6xx{us(t@cR?U;a1VhLzw>Mi`gLX!P(ke!3bnQVEc1m zVA_CwWpZyK1)*3G`a8j~;OLyp=)8eo8h=iTFda~F*;;A31o|u}j8F6ZN8Y*K6D;t^}|r*>71f6d_@m*7Apk zGp;nwkNlklnFKLJkO8jB3Bm(F3iBkT`ro)K!}?KyWCo*>5+Wgs>FC_B1R@!EXbA9- zzc5pqt$1DQf!lh8!a3UHjN6EkO4+ZFxdXSsnb@EYp!6X`uWuGwo@%{qW&PqY=3?7K zvL4XPkRm33-v-3456AFLH|A~4x5^4{^g~uk3biOkeqz3^x$lM5F zP`yqlW=xylkL35UB!^&xw(hD_{h2$Nb;4s(er-b$V${@<1uog96Y9h|k1{V{_V;|l z^Lze#EikI^oM@b&#>rjc{ok?qav)C;C58ZHg>DCrGl%g#x@J;8ngcibyFstdCWT#h zs8AJkc12io_;si#oN&|Tw=^oDL@JjNklh|Wxt)mj0kA+1Ug9XR!jq)d-$IQ!`ADox z`OHyHC7zWs77%!=<(DfM{{@^Sd4c)3k6uD@-6B@@7RKPd^hZN*c6sQ3v1X-o5-4ydK7bJ1zdOpd6*TFJ_XEFfs6mI{g}! z+$P7qA`CQ8(9RQ;DZQ2RqXQV$0Gaz}lfuKP80)7!(3^tjf?bT?Rak4G)KQ+;3dKX^qqpyLIJjS* z%=-Qzb{w+|@+z6{?h5<2aAPZe`QS97@*tQt>)$6piX>b)GjT*_@U4}2wPAYV((7$5 zCin4*&x0gB&jl}VsnKuVvYfhvE9(ca?hr{PMx~8qW2wKFCu*4Fk}V2+c0D0v#SJ0; z=DCfc+VFw$o(}W^iKPz}+8b8f$mEAO^ zPA`IL@E^5cggps@89lingE(_FCHK!axf6&o9(y}^qa#}L&t*3QG8!>BYJF1z2eK2^ zffy<#$!|@4`QG`#=N(ePs+S_RD0)IZv5Yy2BCKMaj1lbQB9aX&S{~mnB=x{M$S>bp z)QJ2H58$P2eR#lf)3n}*$1;Vt8gE%Uw4{dr^$0myY>~xU-=0X*GGj;F>=NvBc2I{k z_g z`%~E#>I14rX>+?|SK@|4u<2|QLQ%8w3B@DVWFd<>3#M|}Xay0b)qWSeOcfD- zpkVL%OU)avpUBcg!$~8u9BP7V^<6JHdD$%9DG_t_qkfVCns^oq42Po8Gmro|gW`ST z7fwp;Qg^2x#!*+V7D(=^YFm0F>p@`Nn=0(h{#(JFfDJa>E2|9!i?u)X`UPeR04#KrujtJXyiyqIH zm-VTv?hrHWF2AnY?O=I7-eGJ@fowQ%e}%E)@eloVvlYKr=T+xl6sT-czO{0eXEtZ? z*&AG^Wp?9$xZ7=!%RQCgBK?vIhOpQ1`=po zx3LUcRHc}3W*C2uJ>0Nu*%C#khoZCu&vh=OGmV+6r0_sI9qD;BU1P-06@COpLQ+EQ z@QqFhepGE&X4eb9Fr^(41Ee+Mq+yZc8bHnJml+ydC_cLyS*XypI1?U>Ujo&Q1{(@W z${tF5Pu=v9{TZ~#L_MlBMA6Y@X8e0ShXiU>hBW{!hzAkI`&PXBQ(nN*zXsgws9G{} zJo!YGc`4I<@41}ls77$wUO}TNnrz~ugWc>~Y9>gE;eHQuw({1`7n5256q{3>^FRfp zJQZ+Q54MUs4Kl2q3j5kHV8~VfvD5*bv~8e+66Wd(e?CLtekCoeR76VbG&BGz%|&p# z@CrQ&XnSYs9P>wF&!=?fI+Nx&&<6cQ<#Gg*zpS5i{2nL=l80VzSAO^e^yvPa7T zUsC`|owRvDadv)wz8;;pgGn$*aS+QO7!ArOoU9srUZ=m9<8|(BHj~+-IQE-v8>VfB(?q2I{N!f_%fvqg1Vxfc zjO35C7fQ9BAPTuA8uaR z28UL4lg?T?2HZ9=y_&aWNpFp(?G<2ll9r6h{6|evt&E9}i*$W7zw+X3$TlUvvupNB zF7X35V70rs?n$*V|9%44p{nfN{8X#sSuV?`(K6yd;~#9EWR@?#ywDIc27$`-JLj=s zDUr5)J~L5E+-6%J^D9;Yverow}FB_VY`N0$p52W3#d6*!X**NxUhWtaJiBnIV%W(?^GN_F0OQqRRa-_BXeS)*i zlKgr85dP_+a$biSx9h>nBc7(G1!#Mm+3;A|35LLM%V!$c3zRRJu~zdf;deM=_peAF z@}#3VWZrcsd2_WMb*warecxLgmIh`aP!dSu1ViUjNarv01u_3*&EdEvOBQHBB%xYz zDP^%(k|>x33m;Hw{5bz}YGTAIiuvIc*uw6=O8$l-WJ%=T_K2Y!au=0+$k9qUfEG^^@;sK zL&I#1H3Cv*t9C~X2NIa*9uc^0HfAiLpv`{HW@(qZCOEG7&voNkYyg`fMDeFfr$mIX zGYjP~c77~(SK9>B#TNF;5uP_dJ*{d>)YfwRXm{POa2F&6M)+NQKuft^Yp8d=_?B%f zRftgxnr&zgOM#!1WLDTZBiix>8P_oIO4ksM?CsW@r7Np^@c>&`k>l@P2rZF?RFfOs zp8w8_S}|Rpl>Ih7`a%QUp>}VjstymD7$@lBNY1hoDiE^}F74|FsxWlWG|dL*;P6I7 z629f!K~DfH5o^&$pbX%#@7ea?XoK1E(UmasN&kX8ikKvrH=A^84ubl>meuud{m^3) z+<5qGw*YXiTeZ4O=O-*hyN2-3W#4S1bvZDL^70}Oa{bqxg$y92x;~jypvp^muENAE zeu5~B7)}AwLj4n#Ks%N2--&DI4n>Z}CJd;jU!0Kv%G}=WNufm+K*i#jQ?Gvv#wP@p zkhZ&uXwT86wp0O_r-~{#u9%R5D{7Z5;YC68 zFQfKr151E|>D#ImRTRHE+S#eCpeAAghU=f~06%SOLm^gLloeK@89}WJ&TkVvJO4_F zTt@Cg41sg34DOTv)@+}B$0uW|pD?&>?S_lQ#YVU)MH@Y%A@`9Jq^r+Jmi(SqAgTQr z$2Je4i#Ce)!J!~!l*j#<2@HWClNfUiH$_eU(RmN*A}tsBml=j{L=W-;b-$HvBjkg% zfjVi+M9{TJE$O&=J}M5)xPk<|+VwJNo((@AW~9Gn-thhdECc1lmbS^w$*ILk`W5FA*4?2@vJYjJedmt@Syv z*VVIA!pi)Pu}si|#9WR{D8UcA?xqO6ec`~c2ZB-!K7ssD(2(kGvsL-giThJ z0gmYd3}QSrX16rC59Dh>+F#kHQp!V$cGr4qg+Pt2&TM?Y;m%Cd|9}(YNDVv-r9AIhh#=nR2nUU=KLWm zCs?JBlfG`|FEd;R^sApGQj*l6FGN{aYn-!cz>XZVJoFL$ntJ?S(Nw|5=KEX0T=M=|)|p~XbcgdU5M8>E zn^e;Z*#Ax`_npDX&Rad3rH0SdO|L_ns`u3LshS%xS}A+GUZ|S;cKxV?&0#x-412PE zS9ku8FXZy!It=-e`{?Kb_XGZI0-c^r+r78NMZ-+ipP!i=y)(o^_1nUesgA6_uYKYX zUUd70A)GPo2DnyBTSJ*bcE|$1skU%H{VD~ciNuQLbnx3L{@*;*&95Mri(dGuG4@0+ zkc0DYs4exs)Fsyy{Qo3OIvksVGr1i+1AUSfM*#HN$GlN0`aFJ-tAYO7TgW*`ss?*i zyKk@-tbYC-0)UpIdCfcHpV3{B@*>b#qb6scBE}hbu(yH@t!HKOE~q{S*Lu_ah?wII zAFnG<|1UDC>{*_QE0%b~k+=H@H}e>f+fL8Rrf`ph3aC>uwm$m`aF2n%z?Ln?T5?km z{VOG6u%rPBR0T$sauN0}9nZLTy zbjacWx(V*-E}om4-%a(`c~vSfRbXcOzH&q7rWJ=Up8Dv?=9=kH_Se`IWb3)C%>3!nfJ1M6-0aY3@SIjntPt zP)|v$FN}C0t+ZC2}`dm?MNJK2X z-8Wu$Y+tt++28CUoBw(&X#W71|CN(_S7AL9RmTF+>oU;(TznN1t%wue=R zvZ91-exqjMwh8ooH{~gjD-}U<3~mV98x_It!>6fTwwu79`OHQjaHGs46}Bn^TD0n6 zH=eOs!T&6~JtD2z=v=No(${2vJTvJe69GYwQkW{TUPX2-oKwAxLz|!)&mGb#7py^Er5F%g&l%orJ&UG zAD&9uIoc8_VM_0qnEcVS`UjB?pp#Z?c1hz4whzUM*HQ9bNC2{imR`%sKif>h^WlE) zNOetgIPW511F^~S96kP&STfpfy6q2!s45)qQiH{`u}uXKIG${qcBV4`aEOwG9AOQT zI1ztgI?~pu2iS2W;(J-sqXA&B&!XvPJEhWDXH`RN5*T%gUqAgr#DG|m6=w?b3i9_Uwark6ap0{`)t)s~(rH84O{%oK8gk|o8(1cy((CbA?; z^8DFN03rohE3?7dH2kjdOnH;tuy=)d402eAAm&pfi(pLmY#$Na%uLKODZN>uB%^Jb z6u^*UaflaOQjCIL>F4kNB3NF{jSzT&!~FP1l!KMGWm>gFXFy4A|{#2I$0ei}*~P%ak|R~kJ6rJAfn1mOONsT+qtpG^G931+ottBG~l!EYP+d0FJu^cbq;Pr zemNp@1Xw*(Q&7U+M)o-7x`<5T{R+b%=pD$K*qw~()Uj$glkERfTK!7XN_C%sMRuG#%t&HzCEMLbc13H%{8Ur%^^g(ylL_O z&~y%dbwB>wuVve=Wm~6OZp+KI?UR>Q%Ps4qEgOp`W7)QC`#$^JzwbYA&Z%=A@0YIY zc`;W$RO%r+AHGr4SCd9NRHMr?pI2S>Le33_IA;doGsjE{m;_8(!G5xLT%*9-ZaiXr zWg{o`Yda}JhZ_;j>F790)A#SIxbuP041&g%0v5xWY7e9T)v)LDKoxDs2CjXQ2q$02ns$@+8cYnGBkJoNq;(?;b?6{?Zr{1f&Y4_J7 z=Z#_eeJDk&lUJZT%@amZX0*wK9F#jLnQ$z%Wd9_nBGySEP55l0u0Yil>+T~oT%V+l z0H5RX=hyI~b!KkD0%OB|7VD2ty&4V+k^jj-*>sC3rRyX=fl}<|9aS3WDx_<`LeF1e zc5|a6+^3h9Fb$J|Tj{Exp^(#vAdSE2)2f~3=mMr)-ULtKC_n*+dIE-JWWy|W*E=V&!*CE)lJH4E^5?)IHLQFT1r!(J!I2OgLTFBvL;S|aetn`pa_4ajWmaVU z-ymGs!w4m~T+dEi>h?srN3SB`Fu})YO%0X0PC8RW2&FCf= zOEqY{(kn^U^&xiVQDYy3a@5aQ9@`|wsA*syTvORD5vMf(y<2$g)a1cpE@nJYo^w@j z^x0Hw6Xap@#Y=eis$K>emqRIN70O`(**th1etLqd41OqCjJhHzZV-&|js|hKF3hR}J41gK%`HTo!Umsdveiq=|fBbUe$H)T46ArDJ+<`NTS zTeJnZ&x3A1?c31`0|hT&J7yu44Q(28wJsbw10G zE2OezGw8zmKron8+52FiW8kxpXCR-A8svtY|x$FqQO&eUO^QIT-(VY+uPkhu8T1?>}l!$@{hg#dmDif zz-zP(V!G%rWrzF&A-4ZgGxUCeW(?igWK#Nl)%jSc`A#Wc*BxSE4ft>91mFHlif(Of zVW(g2*gW8M+L6X5NWblI>dFt>RIG5A#9U&0P|3%moV91;Vxs8WHw2R+BNl|nxcckG zBr+^Vs>ZJim%N7TdwKNnbGFMmi%vK|$Op9dEE_@jOG^>``mek!Y53m_Sv~J( z1KD&6O*U}~O+I)9@MDqa_rgHEceZ_t!W}z>994JgVZ&4UU@GT}DWpO%ub%ScdKS~~ z5g57xAg(>2ETj@_cmHt1ooj1sviQ$1QIm9$DfpFI7Bby(o}IBqjn~ z8TLDO8G%<1!Y!ekFu&kk1XNaLgRW6ksRc`f(I>TjnWzmLW=oYnq<3B1`QOY1l*q)S zPe~plv4guqO6+%gS(F+b*9v3G=BF`EFvl)?(C=!?1#7<^BIkS+>`%(#JB^wtbYvC^ ze9F?)EMs#hw$U@|#O_!OL}LI3*L-U{*chkCkqGvgCdf&JJbPVk9*}HTX+OcDU7Snt zf|-yyib&thzV-f0_P9#>XK{#gZR#5XJ8~JDRRvKg?FDr`@PpD3_+X@$7;}oiUhm`@ z#lI;+tq0L}-R&LFQv9dB1<#+aSX$~9WGQS1pE?TF;o2ggdWHBiv#f(t(YgIs^6K{M znLdOV?^1{H8ZQpBw|z<``gZhD7=W4pt^a?Rpa$oc<$tC;-2>XL_B}j+_OCdT5jVW_ zA6v4t|7Xej#_yGWHM8H`vw$LY^s6ddSl!~Z4YYPPdvh_|StW~vdlQu*cYH@NK@QzH z=3o;03MS{Qske{Yp3tdONVlGLw#&U3QOtzwZv594C^%yc19gl-20L6fW3y+hblXU! zj3Y>QR(n&LH#^eU5GiLC_5KJX^R_73mFg|swsbKjA^?~uZ=BUXf=#=14@-nL;#eC~ z&0*l0iAW3uDpfw-A5P1nbGZ*k;(n|)H4<+_mI=ldQU(DcYFTTiMh@HsL2!kk=Eh-@ zipF%6I^6!yP;UOl%ijOWP(_B;cbn4tG%{yy9Z15w<=2P-dTFd}WClbyL@Punf$6_e z_#GXSc#+iHm~=_S52G9=aGwrA;V?h-(G}{%KeI%Y`>7{`U4_n+j{JcEb!DiyNzg#c zLz6pVW1@)ULlX#MNVdu;=?D{h$Vi==mJqNKK()ke1EKf5OkQ7 z50?G`MGa(27(rS8C+C1F?hA6ZG=_alg*i$7!FtpL^$opl-#DwQZ4mC0ZT0aIRs#h! zd#5$j5FV4doEhocMhFHE9Ti@+fZZHRH{oB}<_!vlYLh}O_&?AGVOEe{WISjcp^&MX za2VNPRtiMsj0-XXg_J{aa5eQHADHzsMEDM#Q+(q6R#73vH;tqzRgaK19jY_B^EVg{ z1@V)#SKmL`;ygsQxOWOI_w8ccNrUebzj6Ebzm_?(fHZ#SCSL5c2pQq)qz$OPe_Wtk z>^Xrf8$~uFaui67D)57>#*i5f2H(0o^aZeDi@E25 z(>V*bC;ri48*U-705Rx;#pN4U{Bg4nd7|M*%1kN~c-rXpZcyYB5aXOmE7r3WWIN_p zS^4^OOLM~lKNAPN# z+#w=Zpv!{!n;+SXX%RVu-?i(s2ibOF-U0q6L40eq*m{QX6|O+fp7n30F^`c4#= zA+-QyF}=4&88w8&oyumIZ@Bt2y(^DSi*{uRvCqqJ(uqF`PiAu!4UFd7sJVZFzIyO* zlvK>%b)|1r%P4|SN4T@BL&jTeLyq|b8@qJM(e}m;)s4=8Z7~WChhGKgsmrUcm8y!x2WBY zC;4O)aP1LRsk&C7raAT2r&nM6KVy6JlwR22r!Iv@SCzy|1GK~<2($(Q4R_sLdpQ(h zJ>*Y^nBLifr>#am-AcS1%tlv5am+$rtOpya+E%j$Ze~~%`_Y?)`9gjn(6}Qy7yg#i zu1z-J?+pi%1hF}@TXnm`($L)HI?f6xSD!+WaZhlGq#orVIm^FzAPwO2)1PxiEw(Sm zeDL-poPt?g7IXLaA}YJOSX?R`@3IssI||nUi+ME^*?_x{6!!l+%qWX|KXp~lHU658 zZ<4=Q29$OQ^coR*pAQPwMDuZp~pacJl|(Z5N6rFBuYy-QknS=YE@EHddzn2Next26_w- zzb`(IFjiZ#|-Bf2zLU_NL1hPVRQ&>=1RcmSLpRWgEx3 zK(32p2#ynlJ8OIj`n6uRRK#6?Ovp>gEl{VNLE39(J#Uix+{~x?DbIx3cDIv~DW0bF z!foIIkJ)C$bsyaJ?di4{Jf8~qp{F(DOW%Ljl%DWwkZZaF!T)I!!BwSpm1F#sU^&&r zg3agfOoUjHzGHKVs_P~og#|K!f=XdjDc$~_(&|7|>K?f(34O62fF}zzuwi%v_SF)L zCUj5;-yWk;Z~?}Dp-CwD=1p`l=pvy4AW648kW7MPXl1St5{SP_8w$0vcPw8z3ibNB z6T$>lgXHQoYqfT)qH!lIhV{e-lX%dC90_#xoZzdxz(7Gk3~dLK1(u%`|7~`Pl(N4- zX*fgoBnU@B&}?J3PabzQgSf>i!GmAai8{Yul_!DER>oJKo0#O`kz0B^DVgys_&&|v zR723Udq~ZUR$kh?8rA4&=1r#a|DJsJ=+Xxe4r-MK{h33bB}^Y)C!tD{#!wfMn>mS3-0uDDxM~c%aC}#Fc`{N!-j7410Vzmj3|79Nm@j`rE57n4$ zmN;OKZRFnA9c~$`^Oi7^hMF3)0x4-*z?d>_5TFFNHj>8KW5XJ+e}S=wy!ea!lfCiz zR(bJ@;DZL=GWv7{6q<7l4Bsd6$9qxR++0*l7la3RW8`-S(g%38o}i$hmwmcJTdV8{ zl7&+kO$-YwGPpU;ZwJktq#pPoVM1}K`@3hWt#%HeflKC34ccZd&;pu#ZfJWF^;AT- zzT+AsIg`70~QATyHwdso_LQEK8sS&i`vIVGJn&lWb;-hFW)J8e$FmQq!L?r3BpTP9R~@bi0O@Nj{#5 zbR%A9hU|Y081tKMpd@n9Gh3X}$Yu)Neaz0pZ<0ZS=gX*c~rbYFL3OQjA}n z%gw;HWtfaEkdw!o_dB#=@wCy)ROzdp%35t$sFBA>2I!OTzWqe-`rm{fx=J;ger1p2 z=fdLhX^c8-?R5yMEsY~xaBi*Ai101c9z$Q7?|2)^p+F9C2GaMAD}~P^q*tl_87(|P zEScnN7ZwoeSOIh(YSBMOmgt$+DD&;zc*oRA<@UkbXe*@K!yA+24c47PIW)vJk%^U; z9plQ~fGuEX>;b{&Zp=oBO<(=hagRz@0{fq8*$Vzb*=4h?^pf5kEDV>QeG1M%pYQ-d z3Qen9+=uoHEb)dGo}w?KWUnf)bUifzd&okMr&uk0py$(*bCpaLgFt)ra$6q(pf@f! zd6D-oJuNg`m43TKHdt67eTelRsRRN8gXt~eF6Aq#mL~|zFEJ~efDjuGo@VW4oF1T_ zsxQKoY}DiIeHO};c#nuyKSr5$>I$}^7lOuZt;ADx3K!w6rU?PRONbCS{R->ng)sDwsKdxnVA2{i9v3K z8XEueB+~={v)oK0eD5bOPjvji3m?%keDR@*D2x;DW5@a2&q61r=u)x|mKT@DJW3Z}99>KpHb!W@}^TmVkK)J^-P z*+ESBdJc|ejZ@C=VW|}iHM+@b{B>H$5<3DKPH!0sxm)UV4M)uS8+aw1>#I7#ws(S9 zysZ!LI0ADBLyz4rc7;TJ7QHyVbeEhEwsH*c&FJYRZW()}#`n ze3XPl$;lAgXQ`AsYw;l^rsR6LOvKk@KKVc9Wxsamzxm1e zWNzl~0VlDMl$fD%N{T;#Hh|n$#c08>iFp}`Vk1O@>`-qRqQ+$xU8DW-7b%QU(l~9` zr*3K2^C1w1~FhwPI8 z)1kD03*r$2%#}?N=k&eEh*}L0TFEQQh+MGbvxHb%btgZ%+!tuuia_jZRKro?6Ok-p|ZJ;%ee zBr(K~@j{H?gJcZasbjgIkdWlaGr)4nK{(~NS3u9!NAa*_UrN)WdK|~Kf|Cy#&uwbU zHNNXvoMzwa9qeAGP;$mGVJmd%`rB~3KeYK$q821v6re?vuh99?i9wX&E&fVkukNqe zfUjgnB0sn==O_YSE={iaC+~S?4)+Wr=x`PGUel+o;QGUUtu@|x$!2MZoQnfs;^rrN zb1*-fjZsSRzY$S8zOcf?0#gS_yRKImH`BxU(3Dq8A8>YOz34A^07gQyc&y4~TICdW z#pL^DuY}UCSaPMI!Gld=`8BHJU1}LV?*mT?k#xBAVtSpt8CqJmWKjOKV*9h>*pTn; z`86xiDuk&%YIjl}FUu5=j*m>YOzsHlYKV<&dMSq%7i*`!-pkp0p_;iHaU>~?Xg zHJTstyv~|au3tgnbvUUi17U2kZuwArShE@bj<2#8$RYo{OxtnnL+Nh6LU}`W zgGRP2cc0nXQYj0jES3Lr)<$}2z?ymM#TxHSke8+! zYnI1)O5U~03E9ehszSTy$4CdBT!Y@~>SwRh#G<9|Qj}V&9tG;)n~ukaB0BV}DaYbP z)6+XGxojoH5wqv9bFmk?@q0oWOQV%-+mgjA>}%~+q*4`3Glimx$+f&!gq`1pW-K6B z&H>^(JI3Z~(hzD@96;sU!=mM*)-$_i#&xH95Fz&A%UtT`$BfyV*I;SoXu?VX26DK2 z>knP6Rj7g|)+@I@HUV|FzD#8Olkcy!kM!P6ielWyOUpuoyJQ*Z14m=?W$p1lz>-^6 zdj0D(v+S;K`YKgXUhlW6Y8I1r*P~l;0XJ=QOOY=tfdL0nI=2u_927o*qV}b2?=5xR zR|k3qi7v4QE9-H-Rmf*0F`AZovN8O9y%*!J95i^I;au(!a;ym8ziCnxS|@(wz6u{* zm!HSa9=#HAI;X7=YT%2T&F0N1Pv{bnSeK0hn*Pc%+W*iX7kfuw z!Zcg2 zo*oBIQJAyucP+6w);|7q;nlCkuU5Jb+duYb>(0QkVX>D>tOXEVJ!QX;;RqV@OCKEOQaUVLna+&xxcU6j(^!4eW*`t(h1<) zarKK2kK-=_ink?_tDDyIjMRQ?m-aU6nw4vbgXZST!9iF3wTV|Nf=RpyiL3Tp?|0$* z8MP6dXuYBJ{{7FNad<>!XiBMbu7oXVir@8R$?Ke z-G1tYiIcoDC%8B1vhR(EUs8X?CcNE1v6(ds(#?4qcHN;V#{!U1E%r%cMoR0Z)%glW z48Y{A$pyc=)pf?u^~X`LV>%Ne85x_o^n*6gr14IO3=2+nP04!P5b!zUv+&_+nqbDh zr{W>XOcco(W}f7gaK1qj!PE%COAJx+z3FSi|Gw-bD&NF^>(^F4hxe?Cy1UR8v>C8w zpPFOCt@qVcsR}WMF@>V(iYlaPai+ly{X3O*U(B4LJ-|jR?!ACTE|QRs1ea*xRUguK zt>@Sb*XrYXwPTj0M$kVSW)+cG&EW7$+b}=cb>-(LI>aWt&n47B-_b~PdnF)Prb$?Y zOn#?7K&o0X3QRtwTjSr6h#E?P*6f);evY<6B5HmOYqR^IBEvXo?+7*2F5)=9Wc0OiD%H*}2#g z)SHQxY|>;`{z%%tBacwWxLR$O-CRY?;g-V`{PgkgrU!P!4miq#ErnZ}i~H!=h0@l! zDKt{H)L^Fv8+zx3SFmQS_qRg@a6Y{Wfw~ z*KJI6x{777frq|n6$9(!eH2#~?Pq^_4{^2x-79{)6+{U-ZSgp+XZMD%Adkah-_A?R zDt|s+zduv2lIylX1eC#E`X6t4F4nwmrkeO2LxVBXCG8!5$%)CRzI%Ssx%D4y(tK@; zxMhjlQRN`I51-7@)_WG_6zwR5P~jz7$g4H!xHwiP>BJhP3}H;lUk3PM2q)6c>2Y0kjUv5A8(| zxLVNR&6a`C%`W;Z;Rt5jLEB%;^ZJXYW$0)~VrMl{fMY(Ec|BrEBO~^a zV79`6oJ9CRv{-U@toF5$%ty1~NMm=fBrgoNK)?JnF|qJ3GdtJ>zxAobaw^c6V$j1K zYZicIhA>H@%ZG9qB6s03N}~VkVuZ7lTNx6f{%}ZNW??L5fxcEg+7+7@riByyr%wDj zVnQ!n>xonyfO*tCfB3$LwbenuGajG>f)dkvZ@xLj)1_?jccy4=k>+k-JY2f8d^`)x zylu0qoZaimo$v+eUi>Do{L8JZ#9asSy&NQ6_EqU32?KX0|J z@veR~d@0{q>1%vgu$>iqLYVRh-=%xNo|2+A-kVN`%_9Mq)q$I3^D)IC?e2JmJ^4WE zb&BSIm#%>vqS&&_6_g;I(ZjlE6exqnZcX1@(|33zoR$2*u+2M(QFkO$EXO_Dr*Vf} zzmuivjPm64-maaquBD^cTY=T^FF?~Pm<3m%v4TL1TT8Q}eWDk|#U?~_UN0ct3N*4g zrsnWH%@8Hinw+l8Ai!{hID{Iv_miwUVeya6_W1$7tUDsV_wpaErY8FxMRd2`+U>f{ zYdhB)EA+qQek6{Xd02JT>u_0J4O`B^xY5Z^{@ff#sQWgYJ&s@XlMzO;JFm>qmQ~Bc zX;*u=aJ>Cz&FVS#xWQD*k9uN4oefL;8P8y9+$^rU&)yD8WqT7DjV-!QYpX`SPTX}{ zOU)%3{Eugu?|^Rcw8xjW)L+gC-LdoNXM!qo<=PIQ%(t_$uhbI7bE!GmY-;oXGe~?_ zwpP8$U)quFtTPt9dWExdlBX^Yo}hICyN>vPY{_#D)+Hn6#E>h+>_je%o9~lXF=qLFVGFO-oF!R=49w=*VXqYG1nc+7)iUmgY%;ESf zI>93=^K^3gxckErq}2QUg2^1nNFs-^H75CUyzLbrBN*eo~vH+;JnE$JSN^F z!;R%*L^r18AL}lh(Uur8zqmQ?6`-2-j~6d;LMDyj6wXKiM@ZlgB)nREMhYi5;J09s zcrf94&nAX^4dOa@jl(CJ<<+&o5dU58>VW3!A8Xau1#+4DN=???LS~Xoi`i_kdI~zj zfN^kP>QEiCX42w)7-)N$$BNdYeqvZgqQi2{-%4Wb{bcKF{q4)QH`kviPH8e?rVg7m znEBc?Xl-Y@hpRVC+Y_}7;f8;FDHZ5oW*&dWtEd<_OJpOmhDnzIvGF4{Fk~)z z3w*M>Bv|l;W{gVAu+UIQN>w`9eCkjtC~Z6Q)0psk5s>gJ)D!UVuc9Zk55xilQ8=@y zY(J6r;vwpl(g6!Y8WCAc2?yVPHzwqiS1iLhBC=F-SRh_s(7r)d+AM1aC*6O6-D@($ zaUwiCZF^b1XU!>FcM}@bIQZb1K6@7JI8wo>E!Co+I~8f5JWa_O10`x-(}}}BBv`ex zEl|6wp|lBY;3$|HmLe_gh}yn(NnCI8r~g|%i&QCQFERObCmf2MOoVk(jc=ek4Q}u4 z1(ZoBML#EDryfHZF%tPb=9;#_M)U2Q9$emRw7C&(gYT9?%If=^ey@bPIRIrs8F=0G z!hsEF`~(Yqo-5Ps7d$}zu@wp##8Km1$tFcI?;`&poo++(Bc38dM5@9Hvo zh$LDIIs}U7fTJV=9QE-DYL{_GkI1J2Nb#31ecChk2uXcg{GrS;ky`e~Nm@C={|)0k zn0mw>#mkU+Ty>0%XP}NO|I_R;k%ZQk$T-mJ+XcnB{*hI|QsH#F!OMGo7aHTx|`^EXjS(|-)QeA ztr>>~RZWEvshI`ZJRvcb;` zh|pwjd(z$O!IOKxbyHg?&aask@0@AkuQOC8$PGTMQC;>R-7W@qAW(gg)uAadwWb;W zSK`H=-564==MR*r^^oCk^7cmU(8&cD`0LM1kxwUv!^w%LZ`mvC4li1GkIjqsV$AO= zS1sRW4wVNPi!`|)qmzzP8|}8Nh@+**>x+5G*M?eeQoH?URx!DvKdY>LARotkbrF#p z$qljh75X?)fF{0weJ_jGeZ2To@n^|vLSanFu>iBm^!esxG+m?8w1IVprE>3{hjWBZ znY|>N#F2YVnsFQ7^1IltfAp=PVL)h`n-g*(j)&9)vq)eH!bhd$pTFgGMU`i{#PN~I zr=-TnmfRc0y0Gr$U**f)oGbVcD4Lu?Ezp;wz01T5Xkebc<4-NE7km31ERIq?)JMTm z^=4i^@r=5e8{#zs?nh9rUS%&B0 zRNu313%L+@mBWund4`CV<&Q)0(~b=N5>S23i(Q6hJ?9ioMM*^#+xF6J;^cRyw>mIJ zQIfMMQQ$V4oowef3;F-NgdxivtAbe#7fz@`qO=&9$>?si5MLXkT#xtIxUbodCgdWu zkVKfByA8D#>zIdmC0qa2JW7h$O99P71^KW6y|c%bcntwhN3yl%M}UHeTu@=gytoDn zB8D4<0*IDmUC$z=HP->AWSRDANfXkT9M<2CjduJ+;SYy}0Q&c$6C)OR#wN)SWl>UH z@GBRGi3b+haHSB}8|l8c-Kq;#wYX1qKa`qw!DI}W0x$Fgo$`Vdi9OmiE8A&5-ta4v zu6p}iFz9qR|4tV%>9bq9A8H=i))pqw3ZRo;L-by0uGM2^G-AC$Z+IVnbq$=(g!AYA zx*z`hxk6@cyt_)%pKTn~XXLBnL+iM9zm@)ZJzB7e^L1n9Z`t9>luZ#%$!p_e`Z(J0 z!;OXQxY@Pkp^PHT?)yU*<@AC5tiafn>w{PPnvFNH_n)8RqEq4A)_8{-j_1T%ZX4Q* zGUyw!sb0@he^Y#4Q(o75PGH8`E*cg~ZZt&t(IT|A^0_pN+{bIR(W#@}>6iEV++Iw= z8IgC6ejzx#*rs96;t{%n-L`pN%x9WQG^u&Ja3Vu8lkN1k?>c$PTfm<^ZpDwlOr6_7 z+Q>naYr_iab~A0p7SbE5tXmE^K$GChGd!d+Sgx0pTPH<10xsx z0-TNuudb7Dn$QnJ%JV}cZj$4vj{JX8Y?GCFk000fSf@4%1NJyQ3l+&}y|*w)_JsLz z@N1isyiYT)*Af!8b9mygTpzoHo#q2Jh_M5yST1{uiUu>?B=1Ld3GB!pO+)=%osC! zsuQ2ff-p;b02wGRjDvrQd^Es=w<`nd1K}f%;}O-Uoslu_WFw(b)u6KWkbizyf(D$I z=eaZDxUf=LS@qWJweqP`*Jc%%jopsQCdOT(B7;+syr7)v#gztE3b>n)K@XrYC_XHp zyE!pA6z&%@e;U5ptKF61Y?j99M4JkAmXTP`ToIeu^1li~KmBbMG8|3)g#V#QT$j*N z!&#uvm%sTgA7#|lKgUOR&(;n_MnV{^zF6_{7gwJWUtew-!o|o#6r|Y#fh($dhl^a( zr&1sc?RJ5Nd-HR>UzgsR*|-us_p@`6p0=RRG3EpzbWuU79sXrw1oIN&^iFVh@cCqt z`mhNJ36%QQYi-T#X*N41QGH&aW5I;bQ46-g^y0het?#q70VOH_JH`Z+=sS zSLh@GedbVl7MOt>lJk%m3XjJ?0oh9UVkR0xqaeLhf2#PtCP@^EE}<1EQ+3Q7HImE%8GYiwyG&?kLioQI4QgN&9Z2e|

Z=!(L+Oulb&fL>a^D|FK`p2j}8(mU%Gax?}g7cA>k$u8Gzh1cm8Ks(kzI zyxtjb)NR3%XIy&{d$-An@Ii$)uLBaAg&hG)^@mf!u>iHray~A;P6|8wF5TiZnhq%-*3Nn6k0f9}Lc*qzlPi22s{)!KwDwk(wGJ zMpMPA^xa?{OVmf12kvd^M&OrflodvK*H~9YCeHpj&5LzodUNrq5-;{`3;E=XP9+2q zK60Blzz~(^w_h+OMSeo7`JT|Wql)Pttc3wo*Es!fcaeo~2khVk91xjxUXzWh@-s3( zHn%#r*5PJANqBa@I|;dBWF2SpbKwV4UhGiTrhWBELQ%UDa?mN{l7ayp{gm&(>68;7 zbkm1Aluzyl+IAzn7dw;Z-CR|~Tc$t0UK#~sDl(*vJaZR~BZugfd&wq27Ac@NHVgd_ z*0p^~K9RjNvz=6(l4uGPO5E-K#nQ21l=%((hD`U9r>4{O=(zx4yX%o^&J(17gOc>oI$U+B;6XCJdW``P(z^J*oc z0@~>g&*)9km3Fh#rbZ`JG3y2bS}63H?55sDI92U0 zEdb80ClEqh5P@yp^4kplD}F%r0yUdtj%+NM<72{yqwM#)R<5hONlTnKMMAcHBb8wQ z`NX}Fj{l5rX8NJ2)n?>7zS#*mfq{A+0ocLVsrk`*2{@sa1&O=oKh8;+mrl-B_y1H9QI;wd{I^s*INa`kS5MF z;;^=1HL4)ntsIh=@?EsUG`a85o&fo+z#oIBM#&!br6oxImY~ z{;2Nx{%2Z%wQo52C-k(;UnDqXj_VU)8e#O53L}TPI_7KxMR>q#JXmgKdlwQWc@r)n zmU|#_DJlr|v|hy^SneUA!H^iR)ol)Zd(`^lcUfVt-2%U8Y3t>sT(aEV!e;|=UqA|0 znK)YW*23VWyCf(6fC8&ofqMlaF^6Cup}jZZy7dHA$u~XaPn)tJKLuL@ zTpurKkVqTn6{R~s%a@}6c|t44iKhp4oAm2D70mBL56vKfy*Ux^IZqEpf&MIa>E~7W z+nos`1dG`#jjLONypC7rfSn}OuWG)^QS=tN3=yK0x^;%v`PQ$S=iXL1j1A)=^0C%b ztNhmI-{rL`|Df(y1~ke=6~?f5A9ejI%1I?K%Cvx_?%YR`rk)F23(Sqhl6A}afKkh;@ zrKX4OEi+TQ)iyTrXe=&kdc{=^2x+(}ko(<;qLlCaOLEjFGbikNMBU)+`?vc&p~ zAUg;{nvx~(z+wx*wexjdUWY-P^}WgbB@T<>)y85)9$l_b?vLga>tOJ0#+52p_VUHrGYd5{FsoHcFHo@66mGHbHSOI0^m(dcNhmF0XtVVIA$$nwkJtOW zm?2Y0PNpSp;{NkJ4&uWMtT>5wF_LLZ%dB#0VEuXjoPXmYYO;NjxV0rB&TQ!Uq+NU4Di>_t)<=Cl3OGd*Up(@^K%}q+j+Syt2tiEGCUgV7j(Xxy*M zugP=4`I`8|SZQ{yJr$5Y8g#qlJv&T0R_5RD!bOxTTJ-C`PULy3T@7aPsy~K=zH!y^ zwHwB;`wV<~nA=cYYDlXp=u)vt9*$#^Y?x``n*7uvfAd6NB!4PAX}`{6T&8 z5A!~(|758TAIE{&yBG%V7#3WNVXBX4n+=;NmQU zL;2TVw=Wy8mt_VoMc=R0s(MVzn66aEU2gcr5bwEQrP{#7%NJem?RYMVMGH= zLu7$r-SM%@ieJ>slom+#{a$&yj$|tU@lriO97XML+Q8!J#=t{;gJ~UVx*ysPv>z6fB z&T`r~W?X5B*+4A53kb_qn>EDWFF+Evu)SwXqy!RCewU9Ghh>SCzYY`~pIp~6^4EAb zi3N9?`x&-26F^Qela1!9IfFpWT(QeG;e1knLl25^{k>q4WcNBPu!9Sf@V*=Q7J?bE zzf}U`hB2-)yUctP1jG|7tfmKqT*i_)xiaJSJ`;dmC!b>X1%chZq+glBf(Ki~wl;sa z7i}5f=9kDZrl??=Y`^h9WBiW=aCxu66Dza&unb47v5lbPnHQ)O8f;BNG!9=-b#V~a zkyAkX;{PE<<|zGVE2;apQX&jFEIEb_7w~d#(wyIZE1PbD*ZY?vG??4aM+1~` zV;?=0pnQM0E??<{bET4-g4zPPcpF@dJ#)QuJ?t5$l9T;5qu&>7^1Wv}oP5-bicq&G zITPbL5?2K3>W$W(EQ2w0m6#C1kO_X$*E`#$9V3lmm&kA!6apX5 zV|)$TpZ+qv)o3Aa8a=x-9tKN43zEJ+J{x*ppB0T@_9|IKx2YXxF)G40K*}I1oB=}` zy576nLqxQbkO1*s+CBqr^bdyePSCp7@V2hD2@R}^HZ<+_iM-u#jHEA0Z|LMyww7nA zf5&?sK}n#$nT?yQb&%MSM}IRgG?=0F;MQyBcEGelKDXl{Nqd`m-zt8x(*M{iZ(}`> zB7v@9i$G>+AkIvKA!2*o>y6E84e%FLJi1M;#4+T_b)d)V^BYe7cxpO9ZI|K%>8;5T%90pyGwxNHpe(RmA(slX0{Bt(^07#{{MU$dX~*&^YvGY*`6>GTujv7NP0oIlN!P27&mPJ3AsK#hb2Ot zSiK00jTjivk1Txk>InECEQ$L*?7J>zvLnuR<3Ich)h3cbP-uq0se}^=aUg7Mcv931 z1~9tI^fy+m25WejL@2Ymq*}@T>WQQ~JP8qsIwb6f-w|~?6iz)f{HtUo(QZwj1XQ>D zlC+@=Q3Pep0g7@^0zLx=AMmRxc$aXVL@#u`?_u^a7_;qyE+VPiGx{#;4@Ay}xVhM1 zr`09VDk7fyf$ie2sj_EYF4o?(;aC7FJJm%C)aLIan;`}S7lrk9e~+3_wTD5-&tAqr zu%T5I0}%UZ2T9pT2?-_s0s!w6eFw`fqV19ORM)di%IMRz+ z(%F$fb~F9tW`yS{?~WTXLgkcUVo_0zumtl7zt_q zM)BC2sh&p(qPV~&VHQ`13rd`BC5Dx`;ECHehC3a`__#s?^wLmk8~>~iBc8n(G|9N* z#C49*5O?Pf^w)Y1(CG*O_i?xA8sZ_#JelACA|%xbK37LOR|>?9OV0{3+DDK3nmLx_ z&_b5#j%RUM0Fs%SZ8ErVrRO;K38$Be*pAm(j^RUWWEo$>y!XM{aFQTWAmKQa-yS7y zJpuMw^F_gjHf|xXa(4>Qsw+-}#vkH{!>m%lbd$c-PmAy}=l5g8b(b6#v%s43yJ~_I z%68jxJ6n^=CtJf6rrw$fxB1*>MfT8(?z_&2=b2LdQ>Vojumb6GXu-S~V!dudtEO(F z1EY6?v$E1*&pke}#0Qekw+s(G_jCAbL$$e=N&JsqO$^}C*#oU{+Xxh#9lQ$P%!Xhp! z5H-QQH5+Z2jN#3j&!*602NrwAyv!9tF~P*!o#FK!w$AMmLT%u^=rys~el#w zB9@=gbt@VcT$yl4&{?-})l4y1?!vC!eM8)kT&}LkX214{xZbJo^)zkxujk6vbB$<& zUgo|o+7Vr9eajRe)@pwmPTpb(ygibyL=G2|xZLw$e6!9ym6$H4U^Mt6U6MZKtJ)L2l{kv-@YuI~jbeBilev zQJ;NDBF_OyOI7-~;Qm31#s&n+s?^aBLu2j!&%3i$ zMZ?i0?EjCqw~VT)YukoNNdZaeE=fg@Zloln5h*1krMo+&OGzc9ySux)8w8XNNx!-9 zy6*dVzVZHj-!UAr_hxO@TyvgzoX0#TQ*Fi~-85Gm*W#SXnIBq&Xja=r0~N0LFjk*q zTdWEGN2iw&lvoA1&V)bb%R^}=imaEKNWb4&lp1u>yPO+-CTNk|Z*JG2DOv{&MtV<* z2P&NCRoM9JUvln@HL0qsuDPNm^fb1={F0DMhS)qUx>L13HqW2X^GTZ_h!>>wXm@Gn zzD1)ELhB562*Y1^t7E37-)8YN_24*`?_i;N@LE>jvOK<9n9p=4aPJfd{xcXNGUxBp zBgU{y&&l@j@N_Ho&2%wSW5tJWcyT42@|%&4inhXBn~R9gB+ydw10HboiB%vyQtR#~ z)@bV{+AdA0KgMA}Fz-3Zh>;Zec7y-x+FQT4QU3_}L$a7wB`2HjMTLnWO09Xadi9|w zx9!d|XR9tIlnbrG;Td&*T_|OMPes(#JuW4?nh7?>8;p|8cd^gXT7QZE`paljo&TYg zUzcgsmP$KgEA?ul#T&j5kw2~B#?7P#J5+lj~x^^ z+`;1VE49Vw8HVQz(8hQ-TCQuPuoe0Hyrj)t5bf}l5M#!DXN{cU`zv+)U1daq8uO4y zO~92r?+%T-H{BTAn~JLyxd(83H|?Tme%^;u*2xYh*DU%XSnn``C@5E2sI1G{*S%2i zdrC@B*i^pj;;GIgwF+Y4wWw*l`mRy>m8`6%*EjC=&HCfeRgG7HFT{c*D3K+;3$(At zz6KsehecnP89i=-Oo_Gvlj{9zxkkF2U*GL>uOYvf=M20coB|&*LGU$JaR0BlJIYa0UY~`DV-PmW8bOBPWjY?V)PwP;m-qd@R>*SU{SxmED5Q2oOMom z0gn)vNN-r7i~Ai-#*}A2MmQUAz)&D{%b%sboRM7!{mwR5 z4sXBCfqy}Hx9ZVli!prg0j}QZdZUU&5bf|1`4Nnhuvg5d?V@=Py7HTY=08P&#eUT+ z$ocx!;y=>{1as}P*2xs-Ln|XLh=V*RL|cp>E+MJD)+*Q=wLYLyz#1 zDfuOzFteXp4QA_$NW&kTOa;zo-v5$NvI*NpLvZ^*KKvN@YRy?eR@%5i=AkRi`d!e3 zq+gK7W}N!zIMxf1$7$n!&EZFy!NZIV#_I>916fYnit_W-x@iT*@Js7YS^3qElH2&a zzC0ydLV54+Ty-Io)2&l3Dc>Cvvq$;3<;=Rq)U=0Wh^oXS{D%ofduaywRlmcHH4)_=2l?+bur9AF4l4**#w>d- zXL(*Y+mwE-<39AmN%s8k^}MyI*>RYB?!@EEP3?g~E1z|)5^a`U(%2h@<SR$rh&IN^;Ds~Ja1_ZIpbdM?zqqAA`oo`IsVa_|AgFna zH*iq&ujf#q=Mc(_kmZ;*3-Bj~j9k*BHUNa<<<^)r&lL9sR2*#0u9(A^Wyp9_WlWOtG> zDh#v|vUmzRd>;HIizO-G&eQx-4$-yKhE1L zI3QJ?wM5U=NnEaVGN*2?{UQ>a_n2%_xRJk(!sDmhn!bCPr%~pIJN;ZFSIX05819tR zLiE%zWD37+ukLmTW!iFliu}dLYl-Ff(WZPM)$1>Leqt(KPF)$O&DdZGExfkb=4P6v zMKQ=n?uMA0ZmgB$uJ>ojs%9C;EzpiuBj)NR0By~=!nHPXYb8CpD?c5xQgyV8@E6Gz zZ`%v8l0@0-r!P07vz}9$TlXeRUk0X(?#gWO*f%*Tfl@|m87uZ7ytkC7h|npiyW91z zYL?XQl0?#Csd2XV213#lI0y}N(R~Q;RY{8oPM8MOw>&P`hfJ`OIQ$4zhp#p_Hs^}h zPn4QhogaIKQh=il0yv_C2S0NTA|nN1STEX7XW^py`!II)_eoQP30I*Yw!;AWCPJAU z!w$-!Fdp%0`N-}>j4@{XojWeEaGAn2|3D!AB=<&Pe!+y zudvsJFjhg&(@0?l><^<~{Tt4vJoO14Ej59LowF!Z{@=J7P`(C(hJ9~`xYxsQtGe=T zh4p^TRoW~$Z|`XPBPly^$em%%FyNlH59pEmihGH+QfQ7Zzt2z{Sw6QNcSS{9965H> zL`A%{XtujZ6>YV{xo~vFMZKE$0DE^qp`UL5;9O;uz~ASt%8=Kkc!&TMaa<(XB2pL~ z&I1>fiaBRLK+QG@^?)>At#C{-_)s3XzegO7CejuTS9&v<((LdB>=OK46}hVd-0Um8 zv)xIhd^HwQSOi&OF7rdT@3=y<_G$_CPL_+w|swa~2 znca_(5;vnr7?c`%;iy6wRND(kRBP@^4UjSG&*upVG;_pM~chH{AhE6VV^mBR=K8vlC{2-tu(H$S z@N2Fv;Tp7t*rg0Ssy5-p{yXbZ6p-{_q3s*SgLO#SOdFa4G+z2&162|kdPbMmh4w&- zj7kU26GU4FjxV(KfD{eBOZruOKl|lXhhlrXZb0rZ@!ylfE3BkAE=BR!Zv9wiRxe6k zu%FgWCAZz1Dl{DZDsc>E8l^3FGFMp=Fu&=tWE0NamS;t<57A&lGH>27o^z;zb;%52 z-lGMNN#JeRA1}RFXMeVp>a>PZd%5*xeDe7`rFQ`(yXT!-?QUNF>5(AVC^c64zPlnk zMYOdd!$^!K9VcBe5~r030q8~ut!-`~i==-cIWc4+;5bV~h% zu0%4@g??_t>EyMsp&~ZE!#g+^gJj8NTK*>?%5NW-h8LOCnK5j7B{4LS20->{2^sQF zNP+A(aS-yof}>dxUg?RVcCp-FsJ&eJ*%GKp6R8aM`^zb4R5^OUwqU&T3dlzc*E|*M zBUD5lv<6WxQITQ|s#xc16-#C>1_b{`dNgiQRJF*m-xD?Gucl_0n8U=#_ zWZ8fROj?;6oYn6(SFac^1gXFOTKb^}FSDPiroaZb2vR%SP6Qw1EMo6)g6&m`C)=E0 z2F5LucEQ^us)Jz%AeT9d&nC@)TuLmZ{korsTa$LX^Ec2$WXJG>v10?dqkttqV2HMX z4V8eG2G)yWuutUgNFs&xfQCMPtZYLe2sAid0X+_J5qMFwFgH7}sm>^(kP&x)OpE?~ zCN3(S@4NE}kqo`_8D2u_EJb!g-0~a@)!#tU&SwNhqw-E@UomJD+0BaYZQ9ca!~LKC zMdBfxgR_CltOc7ajrw+tI%!ta;Tnf)1r4>+D)z%;CkI1(C09nG+FOZnC4tfU&)lq!-9)O<@u^0YN*Zj}F8+iu)?7$VH zFpLQuuz{bw%WH$cCt*PW^#82u{p#8X15G7q>2H$-Gb;Lx-lRugw6%_4N?K{MAmCpK zprA+a_`QmBatvBNez{(t^frg=gckFlb@aX9M0yFKxSdU^8+8NAX0}iNe!Iwdeb5vY zm&st?HV~mKNjT?O!k2c1a{b%yUt4A zHjR&FMSNvjyMEv&FZ=^dF-&EK4`r(ukW@p09SiImVdrp{0C19CNMS9ke?JHRi_?I~ z5r57v0>=os3Yn6!F$5rbi7?O8bVRKK6&@xCSDWl1q#GX5CbMap-yj4GT}H!HFgN*$ zwlZ^hz=`E3vcrDRbVKp?(KL6%NV%V~79k+k#}S1y;}cjb0+B>T1#$nGN*=HXuP`L1 zMB$DgZPHOJkvO^y**jONbG`@VA!d>Pm`dr0cS5 zH!b5j));S1Gkq{?e#6RfyXDr4?$}(_ce$qCZ0 zf26+%S&?QFl>iMX66U}ghA2+#ALHQ-^9W3!aMy;Sn+Pz0x`~WTywVwZYKfZUG@Yas z_UBkDQj1tC<*g4{;y2w^Nvy=cRWB+u;zdP&+|}-ffeh+a^l(}glj^12Y;qLG}|VFTU;wd zuai!^q5`&88G2I+u<9_-rQny!42fsTUNC=k??%{*icv@-*!>kPu=}~0{dNCYFB=)K zOv%_AXEZCHz>W8vO6_(bf#j{y$xm9tDAuI0zs)FA?&b>Ov~5Bd&C4am*_Pd%c~EME zis<`caw0rfv~{BN;k!Xx)CZ$^BsljfAiM(R5RW^?Mwk`lkjx4ttn)wj8-wmwv1at1 zh5{GW2HVLW+*GGjeD_+!T5^}rH4AZWjw$|7QixmkMxVa;H2S3T{w;6-fhi4RvXF;E z@jUev74e)2Zlt+4>peC=8VxTIRu26yVCZ50QNl>C1Qg&g4fjeuia-;IRk%C+^fki| zg?idCvk*S)jICj_4`!ZoEXs{QI&hsmtNJh36zUE0rh#;XGziTD=}2?vS1AV4f$LoL zuRUdjqyr8=^g%R`4&l#nzW+)`3l@+L_UsK|H^@PzJ$`~#2c!f4@#POJ#{Kld@<15L z-2j-hhn62onYYoly@-Jp)@5{-fmy()ft(KU8|Hs+LP#YXr4rCmt~m_!d4*89g!(@o z00WW*uQq0EDN4wCBPXcVQ-Loty=`KBWgS>NxSe-WQOWdHQ#bL1b}z?^2_~7X$jZ=N z9~H6O1GNV3{!;KupqgoL2!9Ru3KE#V%T}46@1KCxlA_g~lKa;YfJgurbt{&lm>$*< zx^Z`Cma-&Rz=_hcH-os2PJ>jrdzcm}yj3CvNhh>HYI2K-+`3ATuqZP;7SGSNgkpzJ z7FcE=k_=MYO#Ca3aFSkBxh+Yt@HW5=d&%lC%m3$oYLIQhdqGSMo1qvP)AHXKD1=W5 zih)L^?qiWFDw-$xnSve`Y?<=R68_re06M0w+OmKEe3 z6}Ye!wYOH5uj)UB^u8o5f+xO0_f8a6q%(p^#@FnpUIT!8NDl#MTQ)8igg#EO&?><<3Q>8yW5N9GVAr1 zm*8g>6h;#RcBK59+kzm-U|>T|glfX+AqtH=SeB02qrMHu%-8Zaz9sG?)v1!Vxqhid zpI|&f=O4o-IFzt)jP7Cq)OE3~my{gT!#4>}=NNF&>->2H?;pck7|V(&8nq*x z2N_!txuy|G@b?OQ?OF&^ERi^`<397Sz(q1GTO3j#25EFRx#wFDv=5XtWtM4ArSQV( zB&u7!bevRG@4dM^8I&Co`r~#9c4ZE%8|?oCTR;eY{aUP_E;ui$-;Z8x5B$>2v#42R zo`n9GU0AI^Lq9i9uhqw9Co7>ZQ#vNo?eaL@7D^Z>yzL;{tDQNEfpP=nPh<$87^vF_ zQ@oVB5W~Kd!299~VJ+8A?7JUG+oAW-uuYTG{adh1B{AwQ(i!RM%&QR|z#?Gsl_vaE zCr$a5LLeH}*1utCR?f*ZO7y&+TQ1CqQl*HB1eIT%?a?p0orE{s9QThEY4geII;|+c zC8erce&;#kj1z4ITVLp3;U>PySlQ@}$#Ohcdi_Y4o!fYbMEm*UN(u!iRuM*pY$@FL zf@p-VkbOWUNtIy&;x-<__bvSOLe%RU6N}_3c_z!{7hRWYt~lBDKwnKde_eM0gV%^M z&I>B>OZfP2(o= zRGG$1wql^6%RocNE+~japn^n-iYQfz0~$IyzwaOzy2x)6%gdEd@ypa86Lw>+aUiPcPl-#VaSb&M8E2h zl`H1W&B<_?)6qM$>~RGl?IFQyb6K4-Ob}^tz5Y14ZJ4o=D&UIH#_#tdJT{sU*Y9G0 zOu(h5%^!`LGv~ti_u=ZJ;cnZtqi*s9t&fe9N);g6eFhp7^K2*Eo+`Fuy__2dk?&?3 z8e4_&Ng|}@N-x0^#AXa6ivk`jjuhW6lsPyRB2uOfAU>3XOfPs9!Ub^?sT8Kew;o4)BIV(yLa{&Xk>|Y-= zN7E~$Cf3AI;)-iQ3J!Fz)0|DsyFC!Id-6czY-ae+*|e_B%~AJ!IcdZwQl`^y9yP1y zI1b!Shv^%`X+m#GYldj&=s_;}G@eo{pr<1k=e^6KY=9yb)dB=+=(rtqCKkl3zAgck z(lWapC0Q*8wiHmE!N8Gf>t6+_D9iNGeKe^g4IWx$%y5wg3o>q2bOXKU&_lNi;1q&~ zM%hg=iK!a;pK)C>Thi+Ov>JHm{LN?MAE9d(iwnU+tM`{K|9xmp35po1+yc14R+o6M zdDoTlVZoT!-(G5VDOX!+@649xBvpT07OdTj*BW^L({6V{nzi}H#=E)#w0zNyXV#>G z&fx49Hc+j1u-KW*Pf#h=S+pT-NG*I@tMun$@WrymP{yLNJdeb1{oU2x=4Z2_(&X+| zz+TQpD^@GkVL?u+b3D-D`BgE#;e*CH(wJ@kYfcrFfR)4{MF3uu*C|%4nXn@klX3048=-hIze1R?ZiR=tR5_|MBSfyQhJIutdYziIpwNaiACH29hJF?C<{ zhf&e}nstoa*WUuNZoqs~e{8<38hSsOr+US6n6Fu$J#^Sb{4w_ZCvX&8`0Wa44VM6* zpx&<%p#~tCItI=5lO$GN7dfA7ja6A_)ow(~(tKQM*s7J{tGUa~NyT>h3{Rd3%t@bN zf;Jt1^J2iRFPT4gw}=^*;eLXkHEK6^Y%(e$C7eGCrHjgVx`^+_0LQROIrBAE`u0&PxtJoEV}rtK5lg=Wv@WUDq5BloVMBJKK2#SGD|jQvS0 z2bFP6;}qJx>0gN7JuE|Pb#3}aoTp=bLJGl z9av#=*zYfBrn>I);1KwI04KC@MwHruqiuKEIFQ#Vrb1)0maa*PHA9diEMwQ&kvNfvcxa`m} zDrdi%;W1cosSg@2O^-+6B6mNH0>K53(j$m$W?3x7<$s8kk;m5b``sn5uLCY`DoqVb z&G=9H8o1l!0hq-iDu=r_UtJtVF0^!n8l;mVY9vO1{3s_`0pb*FI}kV!lmKcpY$jlvcP*_MWpwGJ%Qe;G*6Qn zi?N;(>a1Ynp^BPwTg*LooqQCL=DbC?GgYWHb1CGd+c~d0o zuZZO?@@L6uJ)B)3mIn{DEuY>m&a~|}`^O1{Rttc;O)|TaU-+*Ma{Q#>fC5yCUf{W2 zJyTSZgoB{t$M9GRIpJVJO~g`;0vPstYEZ_eVCQ%(zQd1VZOc*&Bvy%7HDx}uryv4o zGM3S+c?$4#;$7kA;u(4r13e;t;Rl2iFRdt*5oas6SxLzEepeiMs5vR75BRL~t%;)^ zG_6#}AXL3BVec57i%>}z#jgrRMI?Uyfr8xa&|7CRl2UA=6^UFWpYwwHtX29H(fSLK z(c`!lGl~XAs@P-T-bg!><@GHoo5b9YLWBk}$QPyk6%PT981+Oo&oA8i{O7E`e(vfc zOdYmgw}I0=w!;GI2U(Q3egp%s@DGqhwW+=J0YMXa933`$^g&1PdE_LRE%~Hpo!G1- z>!Lp_OUiPvX=VIS1cYu+$Jr_KZh6V>LzOZpRAG)T2t-(e%z<07#uEsRdUYQcS_i5# zM_qZ0OywGrWa4w{`WOsOMEIZ3o`v~N0PIqv%xJe;G(&Hf5GfZCBmxFH*l5*-oUD@(mteO@LOLj(f57pak;5aMZ<}(TV~k5k(1^zCPoar-}1X0 z(1y4VfW|r7AoN$ba6y8d0e&54%N>)7A?Tap1t1Jly;!u18A#!qRw74m77N7SJQ=B? z#NgnlB=z(0M-ufjxLuDB+VDmqiv+lEp2WbdlfTaof`;2xGRT+<>LAmlh?{&TvA`hX z*Z9P@&n-Y>Q}6I=!Ae^AC}u zVNL!cm`k{m2wk8OrCi>zkB7y-GFb}h3-LDcT&22ZwG>0Xi}j9Z{J-&7-_F=bJn{2+ z-xW668DT0`lV&uKm;o5;z|M{He5VJE*fIlS)K2~y^IVX?>gDy6>0nzycn$}_R(2W? z_8sHYTzp{6aW7A+WwmnBYSXj;R#bjAw(K~RaLRFE8w!m1tjo>;A9U^hHa;-sFRS?=neD$b4b}m=c43$W1X#v?ucL`odnQ0V=RUfzMY_amTPZYBHqMDQO=B}z7+;L!abI~?fyv>*2PsVG*PsvMJA zK_qBbv!|)9~C?oP@M&)3@KB)c8=#-DJ{lp9Cv z3q1Yl$3p#GwAFI4ZYTAbMLWQYF#Ni~B$&_xd7g_ZZ)JvF#4EsHK1>r8!6c;a)Xu1( z?(`B+5wQ40`Tlo0Q!<+%dvstOnBDugK%QHq$abmxeaM~#VAg?R31(Oa>%W+{Rupb< zl<)|YklO$Tl!o3UM@p;G8Oeu@U-Q+&R1{(A49KP96S>Oarl%I;*>a>}$kc9m+Q>mz zmB^~08@M);tT*FmWVgD(*tBhJ4Dd#Iq4?W90fMxZA5yx11`S14AqX^1344FGXOe(Sr2Y*>uyu+AL;9X`WqV<>m{ z^~PIoNY1BQ!mpM^G2P3V&@U0oAg`D8MtPX0(u|FKxIt+ztSX{pQZ-Kn1xJpEjOCH3 zGlB=91}2hg7VOwmX}0@M@qw=y3_pU|2xlz*iHkyYD( zG%=08kF;149^Pw)L_DJ{ioZDR4Frtz9jqJvk#3RyZ=hA|aA>(5o7}KP`vjLCP-oi_ z-pO0M?DJ*7(6^xuKJ~c%^@35YAaQ+D92n#_Pgq34<_Kps{T!% z0VLZRt7n)+gB1nErK5-9pFuR5XtY9T{%P$KdYgjnc!m9E@9PGhA%Zkw_QAz%H)#mj zkjzLQewetm572a@6~T|60I*9Oxk&#PcE*6Slu()#@&7hJ2gL{C{ysU$HX-J76}^uv zp85NP6LCfZn>0Ko56%fVV{eNeKt@W?amhXXF)AjsH&JZAH?>3Qmcr_I+<+eejJebn z$gyRY8l2ld`^>Nah(rHUp0Ls74BLrU1!q^g!KoCI0+;Wa^)DnV3rZubn)H1AA_u#g zQZsIcr47I_@BsSB0^4f3GMePFBt@Q=xL-SPY?O^2n&hNpqmfWPP{k2_v}`vnXR0=F z3xo?CgvUsr*VymFaT85wpER9rc-ewRA|3e$f} zz5rUh$LzIoQmrH3thk{>b1Dnr4<$@+U?`yhXtDQmE;*o*+(%phR*B~1!T;-)Td*N$ zkr^3-*$f33vIZJl00$6k1`u18%wVjlJW^AYV{1H(27CvR!4R`$#@NH4N7Nfbd<3YjWThK9VB28V3eO{38nwAb*Xm@u zG3nBfq-|dc+=YKk{#IfbX+8b$KjaDX)0roenTlb{~8mQofkbZ=X-2)3W&lV;k zfi4bAm~`jPP9MYmVTE(_9M2qYE;qa`;kN9GT=E30O%n~C@e0#7MfO8F`CS;mRd_Pg zqW&^NZ$)kZ_OH1|@P=Y_tgeBlkzO*88)eK`S!wm%EbS&!u4jc1Sh8caK@tI%3Um@~ z#ldKBc13?Ok1ew^w^tjTsxaVlJ{R-01F65bW&Dr{(jgg-VIQum!^Aoe%cm`1*ZQulCMmNh zHb|_y27DMCHhQ8!=LaZ2f{?Iy;{l-Jdgmk9uy%9op8oN`4`3ct`wm@7O|B146LMQh* zGxVtG_FPM3nWD*71|&93M_t7F?*uDOdg;@8F1OP@a?MtM)J@{H{^}e?8bk!OT6CZP zDuLz<@=E6Irx-y?^4zNkvK}CPw;xUWoa=%sF4$p!MU;nfHu&Ym@J|AM^sn0Vu!g)K z9Q}-3O5>LjH3mFZyXfJ|CuSS;RJe9){)!98pbdU26P0+OsZT-|fZ+2rw*)&~TFfV2gSyVE04OdBq!P;MojtZGK4s^s-Kg<|FCeoBS9QEfs;~7)%}LkUC32m6I6c z7}Y3Y5(sLA#}3Ve>Uk!uKZ9Qa(f1=98$+4nWl4k|N8XmzdzBRL4boIZTRkFBRA$e9 z)h`TkG_9 zn|&me=!GS-{q97Rg3zrBW~rGF=IaS0ayPwRhvOf{j9BXwIUq(n$On1Qx0yoQ?I9QB zHOFOlbx>${G!+AU#tzlXqt@Ck!kNRTSwGn?+hMRs!%*CA&@;M<$@$hRXD{A>8@oeCH zKF#6F*MVfe-4JXIVLodcYQDS5ryVvSc?lcz>SBMnA~ z2I&Q#5Z*!6f(I0-^2AU;9>BZ)5J2wMIGKg;74~zANM9+35`+_*b=@4u z$cverW>Y^hTqez0a=`3-5*4U?n|TY;-q!~ZkSO>0dGLha`#Z>+xE^z~F?_>n7!$GE z;u3K(!E6oVwQRmU_Y)h*mSqTv96;{=8Ou%=HfUjkJ=0oG-N64+Mh6%5YcZzKP00dS zgs3{w_+|j0=jt|lXbAoVJ>sC0{l5CR3SOZbw$mVJZQf^9 znoajjs_PcIDuk)xQ;GP6OE4I5dh3J5-vs)uUjA)g*YozQ4u%6jtooY(9{CoE`HM3A z7vfVP&@sZOS3Ut`A`Uxav<2=P?^E$v-#)G{QUY6SAOHS+w0>4gj0V?cuj{yIUm9|B z50TNNK#oVU4^^7t36Ps_YpcD_q1sJ&3C99%Cn+J9FwvKs!u#rmbrV8MkO>Lgwk(l8 zQaU3lBD`M5a|~~&PJ$t<3|x3i5DDDh0_q_OfZY(&kR=l>?x#?+6*NbcAoIMj#?gM} z=YyOUa#(39`duyPPcnjQp;mP~w3C;mC~!>Tx3=GyM2$)gg7vaR^&%Y}lbRi`#TF&{ z9w-H#VKpHupqFZZOl5OSmMt!xMQeAO&p8}GieDhVRM5=|I<#W>)H`7#gn|~G_)hj> z*XMqRWyr1QPt$j$M|3c#+Md4e;ttT|>JxS1arq{iv^%ZWrzjx>g?jWaDP@Lh-&~bCKOS0UvGf&Ol+=lP9LCL?$3~MK#?=W zBqYL|`cn#UP$Bz`^C5#R_+)_O+OGPQwPaneago3rukFvhBS^G7{h7#74vKCJuYp^qkI0Bw>MGCr~3XeukY0>37= z%gu!NcUZ8Z=!rl5T$+Ecp?Tk~M3sVC&y^7UuxHd4HyBGnk>ag+kZ($Zo(}esTj#A3<=y{0wSM*Z#XHR8n8bOjW8V^AdZQV ziF?xPiz&#Uny*&*7Wu#mYc>~!?2h93z&SuftA=4d-QyoNP*pAYXnm4q1tN}G2A8_! zK}h10vZeflHH?E<8dZU>g~}3KiDx!V2;)V$M0ne>c|8xxbpTFiGOZFG0>LK_jT(Wv zj2DsM{Z*G1i2K6sYr|ax*c3rw<^U100rhZyJg5m0|J9Fgb={m!&-@4)0dUX@LLFwq z;@T}Dfw!f!9B80~2v~8pJrI-Q6xD;;dH$B2$RTat}pQLjsKK!1cKkdhu@E)%*t_mkv-c z{>11cW%S!C%`-8JAnaIQTy;CD@#dcr7$UfT3kY264~{i1Ks7i~gnx+?@VhK293t+a zRWAf#z>^sAy(e^Cp(lcn=k(Y73m3}3a?mq44(h(N`s&;e})Po!pcJ zTq$spkH0*U%?t!zxNvYN#)Cjayd;>xahWJ;* z+~Dh&+vyb`)KFyZwf-m6_zZ}8-dt=*1W14vRZ6)C5b0+TOa(aq_5yJGeK>;{5&r?g zEj~Y&h6;5ToFc4J8G4b0{iP6jRCpKi*)Cn?-UupSz4D&dq)CIHM$sNF^Vv1jjSU>L zL>4L1S9fM#PC7lg`VAs@be~Btds+bYw9psBvTo?wrayDQ0(f%0kNBH^mqmhcX@0Q7 zm=y&C8AQnrUr4Q?5+51?Yb-FayMNC27xNR?G)mGziX*l=kxLC|#xc8-`H}_n#0uu~ zRdS%A_D4b&fYoLJC5Y!I`}@R^UN}I6QogfCASnGM@)!>81L^wlDIrZ(d12LfuXc~m z;@2njsszA_IwxGQyh&!;cQSM}0eFG?gBoJM1N`q>tDC==UlRpjrLzrm$Y(>GxsFv( z3u=xAyn_y=mAJoGfd8LD>xT0PEmBwsnlhE-0%jygq+t2ev}LRVhtcDG_s6Dqyb}Bq zY?=%*Ms3**h7$fu72VeB0RRV#4!({Ipj8p2h|%54^C3CJyf)@I=dJ}*M4AM8GwS=# zq41OyJZemTOl6cXGgy)?s*aN#D5V;= zRHR`JAo>U(9p^*^$|k}4_!k&AI-d>h-zR|u41ATAhXbpfG=PmrM}_}4$^!rQP=Nb! zAg9S0(}EY}V?R?#p8NZxK9`l@m9UnSDRC)NnwyI;Ix^4U*5x-NQg8@6>AZgnRHVR! zfA%R*5fl7jhY(Sb;m-l2|2&5`7I;8W=9+{Q@O%b2i#(?HnKUfKIR~_Mh;z;WrV%}a z_Erc3eDJZ|NmAHavLW+E_fyq*^TTl-xS<5Qv2#zYWVX9%eO^g01H2~J%`ErV+I<2h z&~D5~>z`$cqyTrrM(FN`69b0zzd?DVFa@-HQsp*^WDrOn7#?G&4a|fqy1X@#MT!6d zx6%uQ-{w|;;>e@R{lAQM%R?~EMPSYU8Fvr#K+h0T2VDYS4q%59eMtXWCNP6mz&oVF z;~GT?qe3DgT2n9wag?=JkL~qC_}nZ6)l+f`wO9MD+5ng(g!mSA#m)LZWXBdfK$f}x zB~YnbTnd=K_Y=ZOm+He*dq=JealW7Ilv(~W4hrvNC^pJLa27zR0cE3fJ{@L2V1fv1 zh0~_lHSK2j(%POM_U_q@$!sDOrn_Aom(llJH?UXb4X6EkP;Ij}_toG4Riuh1-G6!7 zSlkddb|{l1QWhB4w{Y*R|8u_z=zfJYvyV}b{e6-VbRgE>eQgRbV_mv*OuVbkLuhYC zF#*$TrkAM^R6Fk9f`z)LLPE6rHu77kV8z20Z+K94!oSr8sYRyEW*;aD^!f!utd#p? z+`5}IVZ9D}zhe{)if5NS|1<(vO8OJe)?)zRhNz-k_+$)NeFpeN8KK`Jb>MPBU;Pj z90A9)pzB0_11!yE!^uz#K$8QXZ0m?3fWBb#U9vHaPG~ zthquw7+b0IokTfN@e+BO$5rm#1qbivJY!!JZ$2KX$R8IXzn73s$WWHaRlh4zW_YZ8 z90gVWy=e>p&j2SMuP6}w7Q`Nat2cPJ=(?H1_M-pkoTCe#sha)mZLhccwc$4A{Km+g zn9aLjDjTKXw&?Ad_sS0(mWLM0-202)2-)^aXN5?$zVoLA;1)`yqlRn5(&a7J<;jKn zn`JcF3?&KeGq8iiuOv|Wbb6F0QR|CuoT*yO+s_t@nPQ%IwDlfKtk(|vqg%R<7*(ze zdSlomY!8<`D;@447ljTYsRJXVdY>55>bBx6*iAFLZj3CKxz1x;u1{-6|H-^js?DHSo^StGOg z!tGu5FWbSeAt#*_k6ibrFR@(>juKYfg45z&A0K*)&DFOYlxg{$2wsy6^GS^xbL;JNK~#v}qTNQtIJpZEo6h-=nZW!#+4e~iBhAz)*T z5I9c+C@AGF!H4#MhcTc+rco5`pxEr;?z|rUf+SS<)de_ww-2zZ+K0Xgfy!mR;JH(W z%kSeEEOUq}cXe?KYU*G z_ab1HcnP{++0}v+2d99tF+rq($9`9ZXRn~#4=$!-2Atj0?c>@jrEw~>is0xOL_D2s zhGxAE>dI%v7znDbXjUX3KpZ6Z)le0W9JOQPcNm6?%XW$*Aal}fu~T;R#4#l}50Dze zLD@9_aS!#CYwJ>@D~msc7qO5>!$iLNa5VV=tj7;;a_Yw@aETy{Ck!mA0^qtv9Q`qw zKY189TT+aIzoa=0RGPFQ_y+Av?hXkC40RFCzLGvvMpkA zYbYgOofG7Ggq)_aniilnNBJ1Z{>n9C5PUeo(WNv!?YV!_YozL4`pEGE-_M+8bWnAE z>8TrmpJR+Tws;A^)NmTcCknOHLA@LI%=Mf2T#(&Ef1juxb|E6~qzi?pj|uMC1Un~l z>^zj85MkYV8yuFpk9+gUd-io5{8T+f`uj7d>=Sx}J7Z4B?4_=$Tt_FIk1y-{IYrh) zWL+=O@FGL!Uc{FdHGs-g#4V41Q*lzcxS(!{lE>whtE@KncZXhQ8_fu_=dUG4OKi{4 zQ4xQMWAmkE4Yv-Tg?^me(z~NNa%ZbDx!gW(UH`~C)H;@<9vk5PMkaV18^?WK^7tUi z(_lR?tuIhvaLiBWnmH#_je#*|B9HE@o@*Q-q|`IE>OJr|ck3LRf%X~XghY7w+_jO< zf6R*JX}hF;(4~7k`?PwN3ioE`WU%aaQtffS`jjidu=nw{YFwzLc>#*V`{nwl-lm9l zUpy=1ul56ylU#epI#!mN_MNhL2G1sG59!7z!j})oTV4vI#Ac5 z(RnkW%&L!M_ICZ8-Bqn&#V&WgG#GcfBl{@RywskbnJR9TA0=wl4W%@wV!QjY%%1N({B1&Ap3p?50Ux|MDDth=4DuryC22)phmHY&`}$V@RRvpByh(JenXXIJl5# z37R09Uc&e=gGh8HxMitgBdAOvT6PQ=4Jif@$$dO-ie%rKI zw6Iv6pKp4V%InBNx_nxwkik9N@lBm0E5m&AnCMHP+p~6(;Qh}cIxoMUy+>0#OdziQ z?w<5giRDwoB@=!Q(XX35Ov^EURuUm%JJ&2?m5g_leMs7Zi9Xw(lE0F1z9E)Cn`V%$ zkmB{ z(<5z%mam=9jZg6FWh(|k30`2Z>@5WA$R*F@4E%`8|8Uh6E@(XD9pF=x!v&O45Xcz+wuiY>`^SIec!8ad5LSU)O(K_usQ5(Y@ATGs;)I05vQ z6;O(GL%(z(bkloK?_*go)envt57O&_`IegGm8B0d93#za6GT+pbbxnXZ~g*wLOnNR zI{~0*-vCGQGf`l1(5DAg4`A?Nc3nZIg0PcUU%0{n5^v`BnogsH93~V5C`!{r%!*hq z(*Y=w>tX~8^8mEI6;66)_^cn}$-@BVWHcC&$3r}PuM{8N!29bZTTT^#GjQss^^RIj z>^f^7kkO}8JWvgj1W6bd47px{%uhWS`y$fnfPC*CW-W|TeTb;htuq^Nh=tcAUBUyC zGyGUmCBZOX1d@@b0uLH5$VhoNp1OVA{|1K}297nS5GhdDqtjOPG|Dl%rq;YrW0{j5-&w|xxaCy+O zaiT(Ijj%{=u;lvVq63keYdHpCkq?Gz9l;Yd*6+nc03MbNibQ`{`fR*?JVQUfpOkLW ztn%jyJGa!pc))=CvRT!4d!)8eM*%}kCiiI#USs^(!~CD=W1jpD@$nZaCa4eAAJVvS zqNtKjjNvCEx2%57IQcPSq$@`LQ8FO^)a+$(^9AVmO{Sfmp8JM`c3-P%G6(YkJT8Kl zne+0F9Tniq*=`nTh6#RG=hfq>hvQx@ON~KHpMp24c=Cn6eNQEKxVx&r%e8q%>I*${ zxv6MYPTvu}>d#RMh84;ux5QpP%WkFqRVb0^DKP9BCc?@xvAkEQStCuO-7dq5!h-p> zO*alt@Uzj&b+M`r)(MMbp;8}m3-0|14sXl99?pn)O$J|eKzi9lb@c+j zWva#Y8`{Rv)_&-3wLYzm;&eA=L8Gw(egMuZPl%wq0XGLZ}Rwih?1$!*3JoX zQLd?5ve4!~Rk6|7$T}2Ae8le|DtT*%>Q`peaw%FE!SxR4^S0TGT>I5GHCZ2|&qZr< zCl8AR?f)=6?%Hcq?xJT=qyfj9J(;g@Nq;A@z1(z(b5=O7<9+J`i|Vst;C3xiah?C& zYuiMql+Cawjzy10lY-96hl9)VE1B(fuW}@*V##%K?Q`20p(zpm=GMC>iMbZ=bV3ZpzyoW zoe_Og0ez}PJThN6>0Zb3@yLLOKN!&l<^}Z4esR0g70#5?D=I%{DdWWJiG8ZagNux& zSOVyj&&_Ap!J(+%6YvrVDbG%xdER+&>joWQ0XRf*F$RnzCDeS4z$H` z6l`;GgYn~qhx>>%py%D<-q1j(i1nZnY-+hI>DuUv3q9T*R$zZz3=WjE3T2*$jUjSq zT_>w;aVJM0&o`U+uK&|Y(hcm>=NEF_Nd2pb?}G=Stj%z>U9dRm24A8t`xDsc z3y6L2N?z@m3rD~yve)e~c%eg^@GZ%SnOZ22r{0>|v^Gk^a#*XV#P&PU;$d$bfhOmI zmS)gF8DnEvWWI@niYz_d8~C9C4_ZndzPu#wkVL*20$XfIXt9gXYe0e$4f7!y&WgfA zYpt4n0n;}{={lZq_!w}g{xcgQJdGno0ase$Qu?rPbdSvQz5qHzzzmw$o%O{&CApzRs|!O zME|_1@8I8Ho@u^lVDw!$Df4!jZ59X@m$jyE3EU`|*) zzsmmM+g-h%(A!n|w)*PSsA(l!OE6Bt;I_xVLbpUwMnp*VY+noaBDcow?hB3}#S~53 zr261$A>)4eU6D!ivS;*GH8Jg%r=Lu03^OxLV#bSw7BdbIyy7^Fnv_|E@bC+QTBXd9 zP*Z1IBGoRw?0gHEc()+Uu<+_VdX3B4*FN}mg-S{m#-ly9{O38|a^DEXTk-f*IQeaB zj3TI0Fa19%jIpU(<~4NW>SpJOy$LeDicR^>vx~Z%RXO7Fvoks*c-^@CQ9j}Ac37&007Ua+R z`kby6{Wm7NuR_ncEdKDFW&_(@%qsfu*6xUR(1de9@=q^EqdZ%dtcb?PU6`S-X=*v> z63Y+IDg@8Ov^o6V=<0xj=ubaTsR!Ro*sT~Vv2-1c?HuGPcdpD_8+2O1As#GT`Cm;R z2qz4=%vSlrqOuad2#gOAV!iAm$Pj0v&+7Nlwwm5$;SYU-zVnJFq~RdO2oL3jqSgKs z!!EyV{P4~@Y=+;e2dT09l(V1tO|rLGeBtJMkJW@**9kn+``{m3RUl5aVDprXk6W*e zsb6D`;CY+HM60^R=pJhFgy7}^=%h9QqO$;N0ik1=z@#aztz5C+_)(ZKq4wxaD>;~x+!d_6) zP{;Z>(RlFl=6c;Z=IS(I7#=CFo#JMJuyb#WYi_f?xc4fag^aiCT2K%&`9;_7-(TcM zr2apy-a8u3HfsC6#u&Z#P9ma28ND-7NJNX0=qAyl2hkY>(Sk&c9z<^;j6S0iH3ZR1 z^xiw~bwAJhu5YdH|6$FVnQNc>JofK6_PaW_b*d)5HKsfOl`LYZ$|x@1>K8{ARji#y zehK6Z3jB zRoWqY9v2Wxo&*UH;EBCT zvjh5)fo9e9={nJlXy#YS?1eeti)6nuJFpiLzZY6DD}~VWFr4i-Ag~yj&s-J5*%m#Y zMc$%U+Cp)f$%2a&0O;~};u2PP=i#}wcos~H-cH`+=2;e!XR`oARH>)o&#_Gq=LC+x zE&`$)foaOaXi=nqJ($ZwvJoeSxDz116lzU6RBnBQ+H;<$kkQmKc|2 z6dMvAa@l_{!$kE-yDYuw@$-6h?fF979x9zS-gI2iuZO!LW+&+@Z8u3f*ZjIgChFdN zH=+k_ZgOwg1vbU?Iz(ywV|v{RN*6^cEDuB`#RGz)v`-FTJekh44`yheC$(>?24D@03WM7K9#_^$L!N^M?ydA-}qoT45;%BzCXHAIDlW_mNAmvwA6$7X$kJ#a(qLk*8ew-Cs4}*MUkqui0t%&r`dEvYR4n z3q*iZ$iMtw6K_q{ccL@(-nnDZ;U~G%E$*p_#~BY5Sx$n?6qy*GmE$h#Jdl9GZoT*L z)yrm7v?S1Fs|pq9k`hW z+-1oJlgaSl+x@aEzvneYBCym~xh0m{(fKXc?=+7RgszS0zfscjti4!Xx+rl;T(AvACUM;j+w%U-h70J&}d?fp#HFtJSCuzVmcaYiRduG`z zymw(%+2QMRm&{N38wRje7)_)5DcNyGZEWK9Dp z90`wy!9?UiwL|$^GFz?8&vvQ1dMj<1L!6JNFNuVdNqPbZFHIiL-6){ym5w*GL*(k# zdv33~3cSpKgL*>c_Kwz{?)z%SW&f1>yqQtN1|SF5r1|pm+%Bjo-WNP%mns1 zHZUm71cp)YWk_&zKmO8I`atq#eapo8R{Gt)r+TD711S6yp-Ivd{3h`kY*tj2C7rJX zuhfm|coMfaRM29Jj!T#WOu1%am8#L>~kCsGi)u zjtppOWWKc)qbREkiV+e7Ehb)xF>MCI_nW(?X*dLxI!!?VsZR z9lz2w#?x?6eEht@$DjUA>B`5asu_2p`^YZP#wa~5Y6jq~`pOa99lucnU9EtgG#`RU zoh%v5JavP+XUM*C8b*Je(QCA^(CamnDUbMtNw<5mq)qzbH2a^OR_HrDA-c zMJ)59Q}ktHEI;L+)Y)M-HKiv2<2Y&hZkuX5vs+*gn}b~0p!aIu&uY5?@8hS;eQRoi z+&(WlEaxw;^KB~u;pecNx|65Oa_{@b{8yq((&l{9 z65sEaSsIozJ-+q&?W?}wGahK}7*IJ*i9m@m`$$I6yron!=>4}}d^D2BEY{>7kibSp z-=1B@_^`CxF%$+~(_&||G4t&M!M2wX>P5y4`+hv;86M!rN*A51@!&hZ*f-}vV#y|9 zftG3Xld4P2Ecwmalng!!E$5y!gQMkkucDoK@e!V9?*ir*hiC9@v~o0xj5jG~*3yGa z--stf4Vqpv4P*d^rrnPjs#gskcD8qC7kOyLsxfVKYxVaKrlrIWO8{01p| z*;g59mqs(j1Y2tT1}8;vR?(LU4EnWXdgqwi)A`*Xjxy)fGF)ZE)BUa8A+*ÐsIY zGoa){9V5EZC46x-Tl>SaXE6`{1*JF6sk&9<&b~aKuCplsfOie?_UP-Q^`nlOZDpF0 zpi7l#Esm!K%8H>RG#{RutiKhARaye2!)|NYQ@vlpL4lUnn7#%Ci^UQMA9FWE~R(sw{^YBkDhG_m_u@r9x3F^B;!dUx7wRN z6fx(U!%1c)XsM#ef^3c0FF{RN9i+kFeqx@XzJ4O$swo_X0gbm~CC@;3P&{I7GeOJv zR@x8W8M2_S0&2)Hi(RKo$Vyw>RF%HR*43t9U^BT+l0kaQ15!VNYtGSQ+A>MwLwESK zxc+rkDROSmIhxkj-u1`(cKL-`oO{XOac6Tt?tttgORSUr&Iqf#X}!MEnV--b^kRj)K&$BaEUPyYU|0;JTIz(gliffPBvjbfugKuTyKzCsm!l%v zgLD4*FP3GBu5kMKKKd^?VID!!*LlAMn0s%hOX`~74w1h)N_l4O!M6Vlf0M$4YaItB zzu{$^c|0m}9WT34tI(&>F*UyDH7jzUDEx|4XyM!>w3?hLG27z-ee7R<75eU_d@4@t zHus9R%VlJpDckr>P{ZE_q^O^~d!CCqlCM&S`MoN@UF^c5K=h4pOReB^dt66CrJyh3kb9kbojH+3tk(y@YGD z%#-Rps@MDP7Kqp>SDB7&%`UcQ%M-!eUZNTSgUqs9=Z=C~|CEPEE@r#Ctg}4+=HtH% z4Y~9jMo_9_;`phB5ARYCY3-5k%h&v!*!LH7fygvGuou#P2qXF?fL=~OHa;FyRN3@C z&U!p}PMs1(ujUPf<*?zFp_1O8+`Nn(r|zq)1PJ}PFhQ~Wh1$cP1=H6Q?adjTf-|X4 z{XP45GFagxS(cG5)Vg(0oC>3tln?${&znw4sXK@F<>};WD9buib9I1i z1wza6ofgA1cX0ZU>ik_M#B+jaNwY@J;i3XF8Dlx8iHbnXw~szS^Eb|O6sUVx{z777 z#-PAeFo95f+Fof6xz&>*%56EAkq(Z*)OtR1jTE1^?>B;;bX6dWO~$Po&|aK22#MKI zOA&dIA?HOh-kWAXRT8(xPI2R_2qwCya^Vp%Od^fv`BR3aV+KR;K9+zX2FE59QrMrT zU?&-M8T7z$4eyleSQ`w&ZD|hG$!fu>OWb?*`Ch1xv>bcI^Po~(2Y?}HHC@0+A}8%C z%%@!z`YO`%K9JYD3K@?C!kyE*y`^v{ra`QolhBXek>S`wDUrO(RmEs+33T{nt&ik0 zBaPSJF!LKA`*DnHZ?0xz!W%qxCG>@y2#Jhu6ma4rJz(a&&VgFwe}?#WN96072*U8v zf^y-nj&^s?ZOn3?QnsGG=8}EC#nIQ3yo-zqo4@uto$-ivX#f#srNGDvLBKg+AWgax z%#qnfCbcnKfxPC|0r!J?0FE(S1dlVksdScr*cf5rGRpmoH^Bh@TK7dZirIqs;A6}* z!>e!LW`r6G)TY!UHVGLT0mD%s%7r7nguF(fgp>oEMZm{YJlT@%DzG?*4r@Kj7;1)uSnf@do!zE4fEG z9GBx&)|wl`Ibm@@t5Ggf8nNUUyfI@WtJp|W$)Kdce2RdVq>^s%8|eA{QLs+AD?1jx#?0{LUng5*m%$GU;S1`{1aPQ*KH6nS-jWU) z=&hqDyC1WbRNa}VpixtV6GaC2T^tDyM9-O>^#e7hpr~3Ht*angFhZjRG=|0zhO z5*&snAl32c*J8mDZozRbzfjk+_R5tm^hfEX&GHtB zlehj{LnxJ`R(x-t2=RDQmRZ5p?(^xGLLzmNFoV~hTor35=ymW!8C}Fqo3JK z>EbmL$zMA9TQW8*y~=6M@-uT3kN9t7Bc3rkQ-)s9Fu_ZzuY zc1j639xhR9|L$FKfEFPO?DXNH(gq`IUK*S;C+4O7X>1|Z3ss;0JuE7gf9$OHZJ}E5 z=uJeDt}%I^jZAD=#zDoTHE&8-#LvRAh%`D56ZF5t-)0;cd3j5gKn>vG?`vph`KjCxm&E&NtH22!50T>&iL{9MDXY&M%lG- zUj1dC$l22HVRcsYIK?ZEclfHwcf&2ur3FHVe})apnTH$e-uvxge|~-AB=-HGb>_vc z-ev4>A~1@;$m=qZn1&{+x9iLxbILW!F{9z(_@LSMBUsgojnml!uN4s-ims2^vo3I_ z>)-J?MaQQU^xd4|tMc{N?Zw3Mpss7e8FPvbYB2cYL6;sSgPhi{IXEvs=p6@I3ZHNcNbER4@O;Zw8kH52__pWtjlhq4NJMGw8uYOmA@AIMabDo+;Ki>N+ z+Apgkv!qkeC!o$n?$?wgn9+z(OjA&)drp_t=jEDgN^dMa9#C7-Tg+k*qdIq(NflQ9{61pRZRr`(Y=$3+EMjTtGchNJyG13+i@0Ny^-Bt zUxxYRR{o;DB-vq7e&0{Qyo#4N(FV(vnSj`Ne?>N5Oar{kq<{k(Qkj5hI#_2H8gU(z zQzr~@*tUH=LVE8G1U4*1@)F)_+~l30ov-1rq5RAg7oh?VeTEHlvh{=|7pOB2s@$7# zFX!L=*X>aQwcsLs4!6Qo0MLk(h2b7)3j^PFoMdA5{W~v}J*L@rsBvN+fq*+n8_Ob^ zjlmOb#Fy&|m5ky7O0j!Tjx;g0>@-#_iq38p=|*UD!gK3tXkc6fGoK47`~;s9n2Bau zzE&z9hXCYR#Dc&Z0Le#cE~gpl;?M8|3fhq{boXayGiSk?Lnv1bSx-XQxLIwt5wdhfVmX}D|GZ5?} zbq+WE)U_`+#AZDme-0m8lzv|4b6Q4@(ElE`RF@+F==UZH6h|%p@Vh2J!J-C{+Bj!; zz;oF^LG;a5Z@pI;-VGUPJFXT*s6&(_U^qe8slo6TVpU0ndIdTKU&|)u2qF+b#d^>M zpzQ+BFjek^$U)D-nb+f1ai=+^L}YQ?0`0IeT(QWTP)9d7-Wcw?9RPr$@Bk|}2&&tWLBl30`{H~niY1X-AIeWFZg8HGgAQ7NOjU^uMJ|)fR`LPqk?7qnUHSfb zib!4w(c*nq#+Y+ z*i0*OJaZn5Hem<(Qs{Eu^!Be?9UC#biuMuhk}&~>t;g~Nr`Y46 z+-KBF9u?D9w(m0m&7eB^rI7SJ;R>JAowrpVOw!j@>JZqqw}L0*O9&;@8j%>!b6%1B zJUeS4!bHU2il7aaj0bH#AQSO*#h@)1R@Q%TrzYZZf5PDF-`|&kbt`*MRpW$99auA@ z%eSA_JJ09F#VfSHN!%PTRoATzbS3!$Uz&B-x5voxx!dvGeMMPys}oeqCvviZd`rE3 zEgW!w>bdQb#c_f3U(i0)8^$k>RFK*>AH6G5Xh==H{voNOy)EpZx4#(_=gQlPGmE}+o?MF@IIa=;aK@b zRQA&ls9bweer?r_nXH$klXl ztPVKe);&_X){ZxZm&{7}q*#d`pK#OjJUv1y+GXN9QPz3xkqEq1e7T&s{xa!zbLO*- zV-)efVe!Qi>VRlZ9gr>f6G9@tvqWV5Kwzu}ugT*=5l*W7jocU zDC*rG1@MW2uob%UN(x6?I%4(eb6H@~F>Tui7sLkylNEVZAiA9)Fs%K!hW$`K2<_%J z5R!pMh3Z6Z82KoIum!;0adt?Zx4iI%i!SEaD_E3SO0QnA5 z#g40ngH#It__1R8$!(;T90N9Yo>LYefLlGyc>t25tay^}`o|tB3JOte0G@LK4MY6v z_ywE_nyAd*YvfCG6I>Yd9a0ChGD9O86@sHo;LC$=gNUHc;s9_#tRYU1w5fIe4Hps{ z_$iO0Omv_}?wUZda1x^?wX-wD6Uo45QKwq4vbcPoR~G z=>>P^tkwYMAZuS76n1>DRt1D&K6b$H9uqRft;=X>ecx1pBXHJ;Aj!B@oU(m-b@40}L~ z_;izj^aqG}^^M$yQ(`R?P@&1iZje28*SfSz3xzS9X!N+tNup3NG@D0S7HJKVpIQ=+iL1jP_Xht@|k}orH zi9*d;|1^5JLhW%`5!_T~)_K(Mg9CStiCqpUoOt|BeqMdW8i!4Bq*Ba~q{RKFkO!P| zm^&lepN8ZSxyD0b!`Anv?hHZ+^1*4PTn|B*dGq@N!8957UOFCzvQyFc%GRDqO4Y|+ z_B(OAX}Tg~jud_+9iBnLn)t*){otM`;|kMG?+*KDyoxwq59z0&<4FrzsGuLD*F0D$ zVHi!jq1kw$Kk3KRQa!MwC#P#_wpoey7&@-J*WZow_G_2c8X}qJ2{@gXtS~S`73=k- z7=sc=sJT7nSU(WA)ZeGuC)PDTDr|GjqOtWWhw)%A^j}aD{goi@;la|q_jAF{?8C#* z3nE0v`#D#5OOHCA6~{m4jUn)V=%iK_mny>M1C~)K@uf%j@gh^q8!KrbK_M`~f%5zn zRS(h>LTu9Nu6M(VU7s9XB>!ISDOn7}A(n}zC1IUo?3nFKO5k&|voW&lGKN^BZ9qi6 zAvd|8YwN2^U{a;2Bs#j@sg?-1hfuu{a)aRQcW^pPTlXngki9 zKYs-VE&TjJvEG>ZWT9mU|8nq~z2(7gyApC47}4zQxoI zA+>_0dFR{8#N!o;%(iRg{q#rKQksf+y|~o+itp}hn>F|I6hUwMHUg?Fh#i<3^eunR z!Bs;#VuZ1?(ocoS?&kd85eDDk{D8cnu$Cv_*?s?Glwr=)(64p*IWAqLF9>@WQO4x^ zk1tPBvUj8?_I4)=pNr*pXzAVL2s`jRGYsG;}TTM1vs=9V&8P7e;1?85GDJe zw5R??LKBU5Ed?%{%_Rl3F_m>=6Q(0Hu>-))*k-85a4c^~dv~MM) zx!Rv5A2nNBJP{b&<(-}@PQDh01swvgSel(+a><5$aUj1eQs?lY{$X`wx>EGqKHHTj)%^rkSa9e65 zd_EwI;ak^fESe;`zgG)^?eEi-h-f1{@v@#0CBt54h0X{2fH@YV(ENmK7Isf82A~KM z1n9qABVi7}KMH;ZGWGxn0f5Hgr=7;&BiN#Z(|8}6;zvosS=uZqqWmZ-9${>7PbkO+ zzPOis(m(~JVe#gJ5o%HZv9pI<+Ka&r@1rBogv@}_%r)|h3KM2vKgq*%!xCi}%H|Jf zJKv#bft|%r1o~kpr5+I~gQ_500;!cW$iDT!`2n6emt_6YUkJJXt~ex-bbW;?{3Y1VnJ>I01M~nXq}GQ`DON2a zlhz7C6dy*&hTJ=F>g|mwPE?OUCk_PGc04_-2=ISi!rNiQ&{UKF|8TJl;hQ1DnnVFL z7ic#yy#uvVY$3Zd{0B4lKj(+^8R01oKm%MdFblK2DFYh5xIrUC80h!NpdTN=qH2Kb zJr{_%$Yc_UCBv@;nS_{SK zMs|av9E|j2it6%(;v{=xv%opC;Ga=2b~H+zLkCJ9^Wm?gXTGtOitWs2M;!;5C zv7!(o_6d*kWNTfTTg(das}KPC#sq>Svz`yM)9QOofquViM`PcPsqZ1FcG9J|t*C*B zW|J9zK9^cd`Qz?T-x01{KwBcbh4X*Z=2uS`aNgVQm{Kp1 zORRRg5Dtg52c=Ja5|6^NnSL&Z)*JSr?*$ekiO^lnCz+Ky^PI-??xH;q7HF zY#M?3J!E6B5CN6>G4@$K!WH)B_r(Q~jK=1Cr zc0(*u{*B*yfbjClj}E zX*v%_+AX9KAJr@X1XYdN!R;tOv5bHzLAdB$Ao-H_kzQCC!U6&gaZ2UIoCR5WhqKQ!^c==FP zn(}8%^Om?e?YUdOmiJu^7TOk&8R~ZseOL**%bD)1lq%*xbfH%<%rbXTibHv4H~k<; zz2XTME>smf&e#m9QpdDQi2ZMZ+gz}hxewO|3jc1$LWrIzkR4#{`Q5DPiqf*J-60c zIpX%b;vU=N4XK0K!a3_FJIK?DKNW)UcV0QGy#s%xr91M*2&_O@jJAB>LhSE)d&UU< z)yR%GFDZYY3o*ToT~>0>3t#1BH>ifeW z_q@l;&AH!k+Lay6#5DgXg-)kyWvO#y!>Ms|^lxU(3y%KnIo6z*P!hfT8>=&l4QUTZ zsplG2&9VYOghs=V2hRelh?H;WvcQx*IMP}9&djxdg6i{^oW5cbq90OcC=?u@YbE!E zDM19G{kZHH>57=7KuR|LfWQyIOkM_C+Pk2zA2S@g8Ya5cj_IHAgdp*=($m+XqO8=y z-XF)&lW4^<7ii(n{{p?4ch)%JI}i)L=%+Hw_(0-kRu${Q&!p=JnCMCxqI1?cUB&dv zpBMN&s%TB{E;Dbv5!vuQ_`>`1|HHOA^2} zO8&k-q;3tI$S5GWM#E6UbgqFk1G>e7EiYZ0O6)w`F32W+E?cmM)ZzJ&Rq@h3^*BhL9sg?v>sD%L3rF zFB*oGxHuc|!Tgkh2;=BMNyto4bJI2IS=Kxlz36vQAt$_YFRDk|bB=8M^3GI^ z>|(8e|B@;M-hc?i5!vt{!KJ|+qB#6aLne}Fdr&agf9BVvbaQn*wt9@%1vQdGgXqmb z|2FK5ut#>3Iu0?EhDjSvl|%t*Xl-Z)Ayg={w1|gDPo&$IXhG% zXmwilfoT@$cex8=ESs!1pVp6$J>he~S}Nnv6_v)83%P>91%08{GZtJ@igPvu7%_=A z?H|wxZ1T|CEyoc;t-bF^+f?21TJ5K0Z$k1-6)Zde2rRi zVXWm+jQDEISg5_*Su>{(Iz2QiJyiqv@H(*co*Ivb4PbPeUqMyVMLp#OHzh&_wr>V(B+U1Xb^m?o zz2Nm-Ts>jQu{3FF8B7(}d9=T%xUJL4PZcq&P!lBk1}wBU7+b@u6c4%i>#UojfmX}4 zNVy&{7Ng9c>-~5sAfjaV1Z?unBx76yq^sX@JFwJs-+q7GOWZ>>4ASIZSH#o)=vD9= zIsJ=E_@6Qy(CGcpe#>SVMkSs+oRXnwrD!#1D;!mpegOE-mTJc;*l10*EZBVi&sf6y z2sD=5kRq_ub1dKej}_atnQ&^GDFxp46s)`0PeDM(2+v64z4H4xn~KK!zn&*3kh*8e z%1t`9T{HP6Q-B4x0R)tgoI=Gcii4}g`P$kpirC&95}FbPn+oLap`+F9`*tmF;TzY) zS_$i4FE>Q&YHa<9Dj{dzI08hzI0WMj6^Bz5mM9$3|BgT2jFw`6l@m|(nk?k;#=4am zhA`0yP!;G`AF7laKY5fWsB`>_n17~1nN;_x_!_@pe@&P#;|a2WEJLw;@ns-6EWA56 zvEQL3ue)BDjOK$d9!7=`r#-x1fj|Fg+(X8d<&Fh{VE$>6U||A0Cm&t_; ziJ}3>r5@lb5J3@m=29d-&yb}J}j0s6}Xf{3EF*c1T)6h^%u+{Ur_l80AI7`nGs?p|MCYx4@dOPr#ltYO3LUa+S$PLcgKau(# zvij!(qxL6q*uIa^c<;;^cGh=sh$j2N%o{ct#DM*5?BHi7Q_<8UOX=dA8>5jTmfs-C z0?uJP2$NOn|DLksrar3xYw)Y_-~qm0Q>(kgjp7*5zyxG-BsKS^UNT&MU04pKGdKkS zpZLcA1jlAejiQlM|K`PO22*#0tLpWRk%D16notrm^}Y1ynb1%L|8mjGQSN@hcbmW6 zzv}1b^BtrCgnwA*o)P{>vs0WsDtQeO=*rgbS@)lniyu&{-n?g$`QcAq$|5zwO&(`W z)t>Kr+&lDq{qsm>%k{H4cDW~0R-WJmr4{5-)fpp`ovI%){)c4IUYon2f?AB5?_~UW zd*RqNi{i$6VAgz*$Gan{3F9xOPrmGdUo7kya7x>ykQdl&^8MF~pI&MbC3Tse^sW$l zk5q*>c3vnxF`E[WTTW;f98|iK%Koy02)y?`)eqa38$@9ok1lGiLY-AwCF8UG% z|8_&N>v2z!>*I$O|JyIhiTg}m&piqstt;%^&b)a^Z|h9zv%VlYcy}5`|CaH!-ssbD zEHZlKgrcnfx+P{U`oo**n9})A6DK7x{>P+#;uaY0vkY08 zbiVR8w_K_dN4Bfb@_5c~Nf!;(X4czR%(@p?+?KtkRTd4lqHEK%Lhx$T-kSJN_vay}7xou!hG>Tddu7(DeDz1i5j5Y&%~X|> zU>OcSEAWM8n;$K6Rur1Cs|svcFC?D7m?~HTzr4@?gL_>begDu0+TGF{3i|)?`sXYE z)$G7qtA(Av5MS1Y_kFtbj3h;pt-0lUhr*Pcf z#gx`qa?U1NPpW0Ng-(_KsrNaVuVP%6NHOg2Sm(A-(+uWF%snVn_%S}wB%>DkS2uEi z@YQRf#=W3=8b{PmC{gHPohzn{Z0H632^>`kTY^Nbu%JLou)BKQzKQ{K0hcIfTkS_& z>rd7R-0jEsET|OvUFuH&kt!iBf0sTPoMghMAMq=B>G(L%p4lteReI^`fC7!+9{GT+ zhd?*}8XE@(pvOUVX7jjRk(C8D{*%Xv%a?3pwy6QDW&g4U+{WBlkLLwMpt5__&Zf!d z@1T|`SpNlz0g(9!36Q|~$cMdxI&;ruWj9EFh#Y*8BiG3QRAL3Y1GOq4Jl%mmWQr@b z>40Dj035BAdP*uqe1^?!S5EmP)C+0B^?Ob^cf zE8zbIv%wT%yd{Yia``@!)}>Qie^I6RA``O_92EY2MWCRhQ@!ar86d*DaG(U6X!J5H zc|i5d>AAgB5txzB*PBU9mEO!=lnp>;^dvAlzP_WyuJd0QwPKTMeemJUfL8Y|qaA^L z-xn$n=SO$qUDx7DoQpe7>ziiGQ3@tp4%!ZqqaSJS7ln;CadkI&|vi5B|(;}s8LU9p>w#71anP;oZP?^o?`A!}IdGUpS+ zm)Tp(2j{u42>aB!uQid1f+1gD=8lWxQ+6ihlXSM0>bx4#BscNfe|LP}S6L|UnG*g> ze|5XC!BvC!jr1ekW;=OrRdb(U1uGRYJ&rPTtGq>3UDJBu+2Gk|5@_@RN!jZhove9H zMdKXlxHezoqaZFS`!B$k*?PBV4SVT%sEvBYf_u?)IbjE_V^vyrV-4r}VxW$4LM`!X z)^FCgQ!i_wL^u0WP{UEnptt0tkdm*J`WmY7a=bIw>k{dgE6F(b@3LR-=1@BDxNN+l z_?a+4L^Cb#0o_in>_nd;8DNiKME=A6EZ5F^)h^9PAwP7&2OCoz``A##G-4yFN58?2 zx!H4`Bz?wD7v=Y5NPfZp?5dV~y?38jp|@~i*IYH_T0K*Y%_UIIm^q{3|0hpa{yJNB zdpo66@3UkbWVG<{MqMp%E%|h%mN9Q)%lQ;rv0i`OF{OD#Awhy%LdAzxH4M8xYpk(J z8Hi!wN&dNYsaxUoB!g93N;k`_;Mirql9lJ|I{a|-=R48kG}W=v$9hwqElWitbZ5Jn z0gNOc=rz+t5h&@%m7LD&$=}p2nHG6+*v+Ie?J25b-`^@uDJp|aHgn~^e7GM!5Yfoh zta5D6)rb8D-6)zhJX3kUoi7^p=`!u-%$Y*`3)CHGPYw|{josQ|lS%$8k{15g@Azvi zYV4y^?kn7nkN!dN5sYw|e{@byj<>{Vrf{Cmw z7`lzO0bk~Gny3lec?uf7inxk)44#?=65#<)h$JT;KY8*33_r9?9Hb1T1EoZLSaC%9 z9r4&oN#oW1I>Z+Oe4^GFssTH05SP`fU&x|l<@xx&&PQnWz9b#q8{99)=*p0W=4!B> zZq}J|%LgO^5jBo8hNgx?Dxg=gx@bBm289QDx<=L3IE`YeI#r;=gy#3og% z>|izl3mP*c^_+EG#Y6~bPp(vWlM}&c&IR}Z9Utx@iMGJ41E8lP6Ura;Gmh5$H7G(D zqrQ0D(a*|itX%uQm9rQe3RlwEIS~2)QA#{c!CQ5{!LYevVEy@mYdEzs);XH=Vj30wpSLqHY1L!2>sJdg4};Cl3O!Tr z;ryW*#}M-v^ShikZY#uuH1;FkqZbsUKmxea0d?-Qzu18{B|7)CXP5FdnhQ^-ls2+} zfA`3TV#7rq_p8(SeFbCcF_HJJO1IefTDxC@gD|2=+pd1m5tI)2w)EvaC0Gp`ew9dP zilO$)(yx^u^*EJ>=eqt35Tp6gPN-TS%KMxoWG*L%`J~|A_bP9K@lWCF73eX&#jkb1 z)v2%cE%_Bs4wSfGpW4z$P)qCW-3yg`1Qxd4MSv+9x6!L`@$#1Z#&JCtaFZj5)8{w! zy)5@X&t|o~rDE3I(xL)Scwh)?*YESDuBdMYsw>%V97-f-vtxI>rQV&dM)WwJUL9Av zAG~v)ti>fLEkI{@vV7oce!@aal3a}Eb@>3HOq&0GzfSQF0dYG>n#{sd*X{QC?|?H} z_wly>JK+;m|BIjG_Vp?K2P3WDcbb-{-bgAe9$A_QHsyia(jWwh@$TreI9N< zU;NkXcbbx^Ce1eYaSn-eRqs}8Og0YenD}fr;8x|u^ReM-(^-#&b9(Bbp`Rn9HIJL* zViY3~*i;Y4k%qX;qF*I1I24a*`%u{4-Jr*KRHx52E6?`MiFIo{*#B5;{;=fvVQj!T z`+h@={X2%Fb*!o6Ik#eLR!IqzI3|i!eXx`Giuuu4txOQ4^0xI!FX<2dE~RL&X%uRn z)!5EgUwwvmCUhPtbdlThk&HiW|Jl9eU&%^Vsch!BJm}YLc+h*@gCAom;cG^fJkGDY z3+O+-{?zdOp%=qapE2znq1ZiF{I(K{F*g0rn09l^Y0+Y8)vw_5gTD9p2*+^16UbQ(wH?o#E0p4q&umIJcSlT6w-J4^YOp9L8bRg;`knzZu0RBt2bcv zT=i6NO8yxvwj*N5G%ieL-Ix3Vav#Em%kh++mE_&dv^1E)m0-$WfFAL{l)d2IEch># zYo4&0o`eVLO11H{(kSPR05oaPxD|rJsyJ5@b5x|yp9iNzi^gR8f|ag14+`6!4MBp} zd{4z&vaBq!icrBcqUp-#_y2*cKhrgNYS|VpbCvhen$m>*x94gA49iEfBKIJi7kq8s z=pcEGQN`>-w=84=v=aw;4$#;lxC~WOZuEy9t z6Aq(o^4S4_Tnwp_^NDON^pKUPj~|hxDfl^u@&p1d^2lIje|n>Euz2~uGUvp*2QJXS zrLa*0;~=64OFMTYa-cNray1@3R+C}_c51^vSIO#FXBIz53@kjSE#V#gw^}b(wzt*QNyS=%%CTG$s%z0C! zb}u<_}jPTc&6OJ=m1XZ@MN_3>-NVV`bmz8?}WcospDibY@0tobJi%>UGF<()~GfvrRu&5KN07>4%-D?TJ2-j{ky}q6WiQl zOsb);sE};Lw@p0YF z;h2!RbkWz__wZt79`%-tnAPJfYcpR#E%5~Qw|-(YsbKy=`%gjLFOG8;e}@b%SW*g? z;vuR53q+{AaxlQBsLqfzReNE>QdbUq3Il@iMi1^EEl&?M{k=w=Yd#kj%V; zX+ur?gX`4qk$ueX$|4(CY z85Kv=tqpcJ?(P8^C%C&i!Cithu0aC?2_D=D?iMsi2tk4qAh^4`1qdE|iuc|-Gk4ba zWBAjnfUc@jb!yi>dq1*sf2p&;+~QFZ^V(GFY#-jAVB6D$ruBO1qu9)Gryoqqk#F__ zgE+;Wh)F@z!H~Q9`E_8#0V%KUu~(&T%DUl#YBwMu5xxg#ZQo|&!63v50&wYSV4RJx zL|CVz5KM{RfB$%?w|N4++N*OAn|vdTV;YGV&Z)*+Dx0`zZ!GV0>m-4Ue54u!5j=|g z$=wR~sWnFdnh)h~5w_Dy<>k%~bVdDxxF6yP#akvbl6Kd{_&y{_>e<(?K1OrebqGjD zcsgo)^RJcYiY4R6jp?`UCLs()Ky_P3Mw&ki6DR&rlGWoP>a{}P!NJxuaxHXGq{~dChVlwU>b3o_4c@M3;m+dtS)86F!TvH7=7nb~XD} ze|$~mu7q<+8Nxpi8iP@Rx13Qu`=edO-%tX(JKg9II(bm&dlxz0{&3`CUY|G+hRx+C zd*CBk!S}R8-xL8ZrF{Ut^*hITFp6pmIBkxsV##a)GtOlRd6&1!;ZHF@Mwb(8PNX)v z;2kZ8bN;jX2{;Rvw!FLa5YD)c5YLuP@7B0>PxvN8wngbgfL2QkB40q1SlXuqXkb^a zC~OzEIGqlT2^-x37wKP1ybXSG80lL)gn&NVA}KUvMh`C{{cQ&HQ{_?@Q?FBhqw2y`93ys%6Hwz}ORhs$t)? zeziXOk@R(^hNeAcd@(-B)L_{b&GI)B0heGl%I91sl$-i7Iz@?rRRzTirR`!%JJX_M zfS5@qN?-1|^p#vyP2{$CW7IAFOD%;PP1!*xuo~5qP+?Z{RO?KwB8z~A zh|#u!P(Biff8lzk`MlD5<=0v4`vA(xM-sr?u6oLx#=llMS?^#Epfl2?)Nfv~B|M3C zi2Ela{z=VmE$yvcPnoP7=`x}`+f5*;vvKR2C-S?FjwdA=rtf;4 z-LF#6G`sq)vyv^C)v=w5^=|S?-p|?hN+@0P;I0b1n9fM4{{5y1-xYOHz9B2WI${G?i4M<1!= zXQVzi`oGRNc8>w-l6&;za?AeaUpX2({vGYEO`5vw4f(2;atmXUz4<_8KwTl;6TmGx zm#}2o#OK%JRsAfLEPhymOM}Q)GWTuqIgCVi_@`in`VjJ)74h!W=qt6#xzPzPQ1 zDqt1V7nX+{whbu%UeHc)@E7L?lHa4AdvM^!j2}#vNT*tC$Z^-N>@*QJ;cj-K)+cwW zEXz}Rmve1YV>t?QQmLXZxAtk-*?$^AbsbJ#u9-R}=_~8gt2m=Ljj~H1F zTx5}(F@A%Xbw@<`hM_To-gq6KuU!GojvTn|K&5bHerRaG<^34V(OYssV1@R#YOv>+ zx04H7c&^72C~`Gr^A25X^egjQ@9ipQgET-9poujVOnme?_kQ7lpnjK*89;5he(1Z3 zdry#WRT`KHIKfwE+9D+1R=7R3wvs7dfm<66kEcpF@U%VxMp~=Cg!Y#cD3{U8QW3+zQuy`zlMRoK)V!u#~1r#>_0K`)_Ir9Y7o2K`~PX5 zbK?W@A|nqU{;-TV9w7XYVI#Knt0fz3Vs-HzQj2Kn@FMbn%?gA%i-v;EIwRb55eQi zpC&v10nd&=K+CS4zoml7&sU-lhNCjSe1zK_uA{)7=SJDoik792j<+*QZ1R4PKj1Ao2t#jOz6r%1|@n$?a)vIqh+;T!*zS*iAa5~sA*`6agb|#zn3!?zr^~ev@@_;o(E4oTlJw)>U)M zwo7Z&3Qn%cs&rFBM!EZW)RPE|vAY_ifN{L;D;P9GyPkig?W|toPpqFoxwefvoW0}6 z*`P~{v{PV-d))NWLkE_)|JG8xQISyTmGY7*;%%)={+d-RnO4ofNzDD-%;kOchT!qL zkQL)ywbiFJr?pbols3Mo^ENU~fUNh8<&=l!CFZ^KT3%uvTDVG)&QyEnJ5ny|BfqG; z@rT9;7Q7rlf{W$e)g#K*Ozc^{^l=}Au)!txf``pz`=;>j?%)uk$Y`|iwrl}5cIsFe z6;G=E*9P1&)F#wJv#4_3TN`V6_3w>dnrh zYIUBr%`TaC4V%s=px?k!_Rrf)>8t2_0b~47Y)wtHxA!q>hIpMZo^Bh9yiDVD^-PoH zZglqAt?5x&2=u}65 zKd2J|1F?AmV#Ks+Ha^)L zW)NPxNtvCEhgbRqT;13%=Idcj0%fWYkt#O0umBJU3X%m3=B6vktbCyP>wrcYN45ynQqrLWr60x}5ANV%blMA8@GM zq6P1FUZGch!JGvr$MF0CG$nd&m(7qL0Z(yskIiLIN4}e08F&sv^b-y)NMNLw782vZ zuxLEg;^@REe^Hm60WzL@$HP(e>t98wj9xQR5&afnMOmELE^m9%x zvv#B+=J_~Kb^p8wAc2uzS#%mBDRuSA`&j@C>V7XS)KK!}+FJ%BIXw z#V@}THfn_MKzmWmssdSCw{KEPV>|aO@_7GE^%qKZJDevG z07F8U7~k9;rOiYW zyQXQ3YqQ4T<^XJnQ3H9P4X%CMNKlpd?0t?xFbDS}(G>+@)K_P>CYb~KkL=!j`4@?& zpPIMghWw&?nV8pR4h~+j|6M|3W2?IDLK71}8EY?H)0$!k1YXnkq@~OBr2{F6)OS35 zz+-04to}Ft9?SMU0z0$jfGPm+d6;!d_Zrrh{<-JGad}=y>~*N@So$7cdA+-l!B(*} zvck4VaHW^Kk&kf9K{ToQRR`?zm@bt#fvvfiZW$sVLBYA$uj|qesIvt;ozZ`N^kZ8Q z2*_~3YUpby`Bk$77c^0-L6}tcfsN@r@)mJ+{;rC$9dSaa320Bh`=9po4;2GNfG_st zzVSreb))bh1${OUcUjQuqXU1n0&DZ5tbO-`K;w){3!rhCh~gLf=;kZLd$fMZNu63>K(P`ZbBH;!{$ol1|+xMs;FQC9I zmQ>x743|fHt2(^W@2?m})!RAS<8h!eSl!s@j8XPByuv?W?l<>6lDcX8mG+YU1_G*PsD*ZS!9w| z7e`@>fM`BhKEmg%7?g;kPOf6tjVi~7%nunZpK^$sTveMiLgT1BdZWRQnJ8iqz^G>i zowzj3eS81OIqzdGvL&R>v11i)F zlwKLfA|&MatGLnE4=g}$Yni0hHiY6u@@4?em_}cm&OQK{76UZjWEqS_1;MC?C*>Z9 zq>qMX>Dw&ofY<~*aUPW?unHCR$_^*{Inm##Y~I^aB<^d+G7}U$S7TcG3yi%i(8vt1 zfl>aGUV;&BUquO$f#@(9=T5Plv^P|BP}8fnjfVPFnbkj6^#|>P`Kp2D zT=0S-&>7bH11hzErL{XP2Puca)iJthvMRj0$q$`hcY)*B&)Uuri zXl2B%>E(}*W}C5$TjRH*(0=((ar4jno(n=2{q>6{q|9Uh)^s-yX#{K*NAL6HE7O-}9nR9}CGsSZxAF4kqY`Hb zG*Q{R{`TPSe!r;3^FvNgr}6#0;paMARf0W0oAh@BPFpQr{(44ExSgvWdqm}Ry4vfE zShJ(uPeVK@&wAE zY$fdJD~uJ5aJoaUQp9;2`V{W-^tIv{Pd!Al8=YPcUTC-uo&S#bDj~T{#y{I(ye-N! z`-3UzQFb2RnwMOPf7F=o93aH1Q2ub^I}vHRXO4~i8=QHp9|{~Kjb8hxy1K}+-|*5J zeA-mRrTm}zB6ri4tKTeEs8V}fN~^nEt`HrgO>8&4+Dh{OEe&T%`wi8K#Z0XK*RIW1 zeC*i3CAHy{-D@KEHj%i;om>!-YSpUr3U(iU46CjiprpxDG$E0aQ=?%pX!5!PNY<90 z%)bygFJ9YK+KxS_$U*obd9oKWo=zj5KcLl)&!P<3ttEYxd&A+BXE2i=Tu?>g60Cky z6blqn{al{;?*!72^aAf$rt(pB?2U=K0>61ZdXhfcnd(&MfQYT}C8U`bS}e&`jya{Q zyz)e|O~xFb-Wn3PVIvbVemLC|o@U7ddv*n;%x*=2$O%pxyEd2^E?}<2IZjUH{9wzhJ^N&nR^f1ue zA!Y+x8IRr-*0UX+)JX}x1Y!Vr@0P06h6y4A!6|I8qyh^_kinpT{Et7KLPECiJ7=%D z_EGBo)3x3b;mGB8m_?VA9!~eOq~MQn@8RxLv27tPlbu95>NNYC=zaGKEnevJUVS}M3B6=X^qaIL7zI;c0&;zY9?P(1!_JwjTP zdt%rM6tzQn+2H4>7Vcxv1c-Nl^Qhmf0IYA@AB?)!geWd z_0ZE5<@x_;SREVD14Px5FSba)r8qTelijlSiDh>gcw7AzPD!_`*O>lA%ze5+mW@)z zHP73yHw1;{m@iTFSiliq<)1xn1^))O50{BZM@1F};s`5K+kY_h%{DrIjt$j=Gqx`* zs@;g$OUFWZraYvkA&X)?lJRWKwya#DGqir|{PicRCEKAUtHazQ)e{L6Em{p%5`p>) zWY$wo6}i{u5?Vg2Ks>_Y`&(kWRb`8oBq4IKXd%w)!7vmAW?XV~rHmpHZ+apCu=IQ( z!W45hBN=p@NHZDpxw$@F_fJIu#MFP{m4wu&Ytn{4bd$SK|55B^e9#WCXYipf5L8-8io@IQZcly##<1}y*-Of-;k2Y`>`+iir>x0o zk#RRf*U8W&Lpm{f{zM-zt&}o!rJ*rsT$vsfPJ=y|3EsnM5L*P^H(gryahiE8CO>78 zpN<*5%7dGB0^Rmz*{2HwiC{OnqgB)XQj}98wm1IGO7EiPG^+dtOw|-W62|^`z`*;v zO^*OHKML(@;ups=r2Auaa|1ZRS8uuEa(ySB$rxO;q%%EBdW2go@qllX;5!#{9U4CR zP6q~4VvxL9>JYXzVheNyWl&~xTVq2cB!J@1CM=?AWa=rwge=;Kz ziOo}dQMJJ16^*1F6Z-n=TaAL@43UgK;u4=+ZkP>jzZJic9cuC6FRm;6(dCauG*Rqv zq@6*EaISS4Z$BCEbo#7q$N?t*ZoAiVn>?UpyD(~8vG9>5T&vqgW%gGE{HIaI-x#X4 zMAap*&!}=R#$9wd*yctRH^;B;@x%yG1>yo(h8Jg<`CuHnHwNu4!s}J0qxMAq$ow%krg&P9PRodQztd2nwKi4twNE z;b=l!eH}bZTO4d_cP)ofM@pMcT2RMOS)4Cgx|vqt{4cuoKV`BhsBk>nF+7pT7Vh&(bcDVKBpoc+QRk39cs3 z{wwNuH2sUkA6F+#C>AoQREeI6-t`)!5nMUQ%*mO;DPoaKMfof?(KG37t!Llj3uU4v^&i|J?<(1z+1M2=K+7qAz!bb91eaPwe1%GjDHbXsjAII-g4MZq`6~WJgw(R6I zbIuIbwtuZe6gh(<1_NvcuHndUn_{nQZ%#}?>P(3CKpQ{KCXRisWs1ArZIWioUWGJ5O2{(@(c{7zI| zCyA84ar2rO^UoQOLg3VQ!<1g*uVFo1AdO6oo&3{*Q*Ol5_wefi)ed~w>yv|5(iDI2{4>Js_X z^ea9)>>S=K^Y`1XoxiJCzHf-u`u06Hc8bf@dZ=qK#leVJk>PzYJUc_`0(`>yx&PDw z(1uUZEmXl6)SMJfrGY~aPbRx8%Bxvc--$v=u;2rGehywM6W;r{1LrcV%|eu7f0M|v z=>-Te_%pd?11A{Krx&sJ(Q&HrP~_mkrU+=~-_^E0yap{U$(XL(_gd>Yz!y*|p$ooWt~-Vom_YN>$zZ_-N~*AvEW?gGw;5EZ zZc5h(g8ukWMZwQUrOXqZ3c)+PFH~ySJD64T1(WV`EN>L2==5CSFMo7O8C1)Qo(<<| z?yRp8JH?f3muV)P^D(j(f#Y9b0556!$626#Rvi1hw%QQ5q~7pM)co0@|FP*+nqN7i zY+4P?m0yWbrP_O@MolJ^m8mrKf(J^f*E3!HQ#U)Lc_EBcGAJeQ6R4X%Z>c(=JkB|P z->u3Jt-l#wsK3cwxHleiA04hb`ssfP*tK2Xjoim&Rv(ELoqd|FaAsiA*cA~yiQi(d zv-8BHszJZ1{u#4)c|^@F9XC*QlrY*7vF>V)K^RZuU=%E5qN`@0Jk&QfA^gck!@;as zmkzDF;k>)idNK^5^3qRl*<0?`=S;glM#$v;njcD7uXWxV1;fI^t z9&aSuJ8p~p@#>F06vB-?7vU=O`mAz{X(mC{0kvOE=qD9<91y3;-+50bR>^d!(T{w& zF(CA zL?vtA+vPk5y2YBE9Wb_b?IK^XBSpwB(Sc(3#A*pn}0#57rL z^!vBL#><~ZKF(*QKFwl{KS~fuZaYf+IxH9(h&j~N`W54BD{T|0!vrd$H+!Qrj|Wq( zit&Qc+}%5#5QjoTguDvlf-MUasoBPHL7UrikK$cQj za3$Cv0}6-_jMG2fy?w{6)wdBb;?*Xg zC}5CQvkdr0o*U_v{O^@hZ2lYFpBBSPGvlr$=(`XJA%A-i8xDQ2yw`=0olrLJFv*H0 z>F5Tb z4=P_~>673gxhTsq=FNyUDfM`F#mmLccdX;~ZSlv#jO+Jp%1RFmqZJDbNVj0be!ADR zxB6g+jh&NIbt~;}jBjD+{@)mpGhRHd4YYNxAb-pRvF0s4(y;Cih{62(MGom{(KN6> z>as3wkj^Fq5#_c)aVum9i_O|X_R7$qKh|@#6fHi?u1mx7rlW-KVu7byJ@<$k7Zw)6 zET=>E1ao_uo?T5Vzz+N=j;Avhiz98=ag6&rk0i%NRIbtAJ!RSj-FIyrUK=3FSM5pg zu~M60dL1v8e9SKVvXz6tR&CYkgJ3ny%h^YaECgjXcLdv9{KP_zV8X#J7Sngtf;;hz zA@bAT;Ax^lEgbEOB_o@?kLb~_R4)iV&L(9pgf3ohK1qnG1`9dMw50XmAWWpbDeUH* z-T07(h(fGOUl8E25-q}&F76o}#<)MT3|bf_q`(Qk5x{=r@IP}6p5dO0Vk}E?9@+jl zeMH-oQPVmdZkR?eV3e|=CL>wtZIHq{*?fBgpkkH4WexTYx3W{FOP?yM^{Ryq zm4e_zr{9B8svnbLLbAEmfaXm{*^hbJCAGJ@yQIHAxLhoGPbX`CnY{J!_dY*<7(3`O zdrnFFOjq?~#>p+wKQ{T@+JnTY_-zZ-Nz_s`k!2UpVuPLVWDtxm9-4*rT^ zKGagFyL}Fa@;g0Sa~}_{P$z%i`gnR>66Fyi!zUfq!IqT47{Lbk(_|CI=N5R8&!!xj zd_*a+(*9z~?^Klc1fw_Sdi5`X?0@u7dGy!o?;$XWXb^ zk__pkPgg^ldgSEZ#4K+X`Y&*?hu?*3clHL4Oub#+pY1q_L&N2@`d{gZsf?VM9CBno zn!*OPS>%xdR+Pl6f6si4q9)(kS#|x`V_2Z zZ;hAT{UW6$|99`5q+LCYg+aUbf73*}?azIBS|Q9$H^|Yws#QzpA12HiL>Df$RPWTr zHo`6q^^9p(eeF>mZXz2t`0wP=f4?_w{N{#?Axo>cbUDHfi_dGi$!?#O zL8V&9c;#!2bEB4aV>Bky66&4fe<|&TmxAQKmnutp&etpJS4)ztXllG3W$;B9uX)%` zXXH=ioh8eBAAY>OI#OULg&!+A`it}3yvG17<~P<*uvbL&jl*c!bnQ3XkH*f+M6m z%oGG7EKrn@)czO9H6`{|bqram03DAqASOW%vED*55*dxlDgD>lvfH`?M5(-myM#>n+#V??3!GWeFjFA@1mK-eS)3rCFC3&K zI76+aK#xU@Duw@R8}7!lB3e*bV>RIUFpz*uUo;;fLoUS!wd;^QY*y9P=6 zp)P84WLN9Lptpt4LpTzmtSQ}4#f4mvZd6>pQ*E>ag=iUk8H$?AxE9#0MuSY0KqV+! z!k;(!S=-CsUeNJJ1uZ3S>=;Rq40N3r2;qT07XZ)(se+&9N4Pr3n9+<-(y*_TUqg@$ ze=cVtqzbd=uGG&nWEqP(_szF&m2fiafSI;ETX-T8Ez2+ibP4)F^2t?b%%X@S6+tJu zq~a4Pza`x7YTQ?wW*XS<3=JY>TXm54b|;xSVwC6$*8xjz-e8*iQ42^BG+9LsgGjUU}ll| z5bRArsg(}53#+QjL2Q(ftm@#n2~Q@!5d0f@B@n8Qi_q30p1odDtyM?3(&}CRu1-Nl zvMILzUPs`S)y@Ymu%eECf))LpyfnTHnaMDQrmp^F|Gj%t;0+$^bqHldu*qQ$hU6&l z2DtF8oQ|I=`kyy|xydbH70pvOY(kqo2w>~+5OSz{Z)qV;k->~MvQ5cXnw8rLO_mI(-HV%t z2THaJ`bBl2*iac6W;54=ZZc8>xRE0yRP4M-(Ao2Qm=k1By^pSEXdQa3<@yx z+wvVJ=Pu)@8~12V4-@rG)*Lm;y@J$!SlBH8S?`Co^i+M{V8ww$-0i4E7R#<^Pqgd*QrS}Q~lO@ji=#K zziz*B`GD4v?JRN$ZD>Ax^SC%La3UQJZ{SjV5-ebpkyX=_ZSu1tOOF2gP+I-D(&0sQ zVuVdf;~9d-L5@6S!FkD0C~Y_mYA*i$3X#|}O>s;XGPee{m3!m6QVQpGt6COY9R`J9 zJPO}o*v2?hUQBcpBl27u;2fLk(JYo3K&;FBdthKYPoC_O^J7`h)eafW3i_2IdGey&f3BebYB zcy0Wn;^{iye%|r@le)#-o}3Vk;sR)Re}P+-gaY z0IfSfQws9T+T15&DKDFKv-I{3y*IuzC1-c(LURa4Hff+)su!vtW1IYB)9AEQgau(;H?VnJcLA~Rg}EWz}ij+49FcUQJ8O%RU?AGRoh;%9R>B` zB#4+D3^Bpra8vtXu@kRgB}iHE6-wIML3H#@`&*sJ{}8~y6Fie6kgHMq!J43UAx=eW z*!^&s;i8b`#G8SZZdmIdsPk|E<4DgXC33_t1wY(#oNbar{w;!jU5_6U2_UwkV@d*( zJ@_`nHpOOkhNatL8EB>9{+4UxBtp;y@Vi>%UUS0yb%%7p6>P6T;6SY?z@J>i!SHq@ zLlaYH)I~;8L{E}hDlqGUHYxhg_2%oaW}-+jzK3*ritK_JSuK2KL}6x!@T-zRVnN~wY(cdxMa zy#XATzgCQI6?&Uw-BX;HaJ&`zwB$Nt_)YJol*s)PJj3Lo#x7hP>XCu>^WX>XU-uYd zs^FlZlQwutDYG2p$sal(9E{XdGuJll*GKH=k`zaP3etuJMGS)9_7+c>jN~w7EiT#X zKy8HJrT$^NMulBCGy~B^D z-7U2GYw#QV@AtJRp&AK?e~^a&C&=+hK+EN8vQ+! zTM|@ra)VS4H_O6&yw3vhQGpqZ^S*b?-Ghl&^27GcaNSV@g#ek#T}QcVNRgGyXj0MFGWfgue(_>Tm868yjJ4WsF7^F{c;-H( z%RwX|L#;yN-{XwS$8R+lv&dGnLrW&|897b`nma=Sj7v0l2=5A9vtjh8*O_9p)1-N_ z?@HC4Bu!EI=a@_V%**9tdcNHJ_|n%6u_~bc#A*+fguIm&3}M)}^CU!QK;IN~oQYAh zD{)c#YB>G*6Z^YcufaddNpIfjkA3#uKD#gJZb)|__+WV|Nn$0yXP>M%a^g&O{ltln zO{A!X|4`Yp)c%x67?-Px#Hvs@Lpobvy|Sd4g65F9F;Ua;{GRpWzd_%-h8$T*){yaA zJ?_+rigGkok5vi%=$~JvMzmYZ4cf#?8LSIo@#X9qk>9Q_N(`L=&VO*3!-Gk^e;28t zh;k#vpZkcaS|vOB&V|*kN>?T*goA9Wt8y!sJ)1lxXZd~qxrwgJar+A_MU|$BlrP+;yGc3xOUCrrBzMkb><$4tE>_cyE+c{<+c}j#8KTfb zCeKkps)LAh*56@m1m6nmHO7_WzE?q|iU~BZWnl%bB2jN zs7he*2^9honVg5Mi~LeloZV zGhp5p%!xfK^afcgO%iP$8$JV$Rsma}Rxze%4oqe_KOaiKG+mR#H2h9d97-UeH&0wa zr|dslWiT}voYGv?+Gj1mH)S@0ZK;7RwPCMjn4NU{C$82W<QxWoMZz zqow#wuBgg>87C&SRy!SxL7MPd!X92%HG-gtQbORDK)LSMOf%{7{C}>3a-B->%eV+= zlYG0X%NI>Iu9PrLT@*~!BkfbWrnX;g5qI4qpg<&KfwOe4mB9n6Cm=g^IAJz>hxW^U|10$FMw7mw`ol7uy>H@ZT|4*yi56bO zp_W2h@m=CzKQs9etSYj$J(ys?ZD-JKdog%lx2+MaMfIX^&Htx(PBr<2S2@;jL-;Qu zS7=J`t8iYMVnI!FhB;yV8uOe_SG>!H(v7Ncvy1xou(+_2S@Yf~K~_>7v0--T--3_B zoX|@_gHqWU+w^WtN|R+u&yh~qO8~IhV40955fjh1zCt|qMx?ZMwC^6XFKLPs|K0uW zxycBIet(f^x}K!xFVaqxykd`T1*hDRb9vum{*N+Cd6rD^@f|P%GBB;NA8gZFxw$tu zGS~@c>tTZ0<>4<5#P0764a5F4Whu{5(4HQv+8+MRQ*)dReS{8@=ShiHFs-h_gQuIn zk2e#|a*e^G^=`3qxYz02;r-*hD^rSY=A^xL0d9$uG5o?zuZ*Oy4R4E8L&d~Vh_w*{aqox^l{uw9e-kdJ0 z^ke0%I5L?Q74qexMXueiglSWuzA;jz5T@d5_nO#fh0}h6eKi>6T?!2qH83rvsS-L~ zXifGP_t~r(oni*?bYTKGR5QQHC8w_z_GZk4YDMGymweL)ks+>zBX6`dMQoNswWk`$ z9EZKNyt5TpKC~*(Qd5Qqn7|Bfez&iM4lT98EA{s(<;oKrX_pl45ju{HY|)(yaPA4v zta@BOX3O9A_M`SHrRIM#XH~C$lS#AL*j$}&%)qQM?jV{i=#hE-@Ga~7$1(P4scf0@ zEl-7RsSv?ydzI_+mdD^(FCe_UhuxSgHh3T%6P<)HYk!qFXmC(ph_XYd;3Aaif}r4r zNa=4K<93idvlwCKVc$2FjImNhg!7hJy86~2Zu|m)<K)2Zdb{;-c>+rzXoxTILz* zsF1@dnOE(Ef31RC1n8TT3E=Z8RA-p9XS-M2Xq)}i@MEUIsy6UR7+~Q-nEIWd*NIWc z4yU8FFr6J@NjwV!O#(9fgIv-Zc*JFEdz#K03Hzfw-!Wu(0`Ls32=WhaZ|_vy>X1co z@d1pXo)1JpdSHF@y-s8vK(6D)1$2siyx0)^no$`)=5?!)APsgf2oFITx)A|Sm0&N^ z1>f~2+B#!-1C95WEb^>}5ll5x?n5hWEYsE`4h6ewkf0r@Ss`q)jEjw25qP;6ft2gy z(izjuQTI;nD1-9x5c=aO)m%b81&*lmSO0maAx}-fQFsMWPI~j|dik3;_PTE;r4Q^i zF<7os6w#2|ic>KlH8Y$aW>i1nLoarS8|sgkWEW zCadjEWDdv#O($*nKIb-Ra4cvIu4|T3X$-lT3*sg+h_#kjfKHANifq?-+S+6k;J!(r&v^zIsf*Pm`+Tv`Vn^1Mi zNY*4=28b%3{OyQqO-k@MCP*sRz=}_s9{j$L-TBw!2Qtfs3BxK-v; zS>M{Tm7@_b;k$2SFcP~*Gy``o5STI;E`MbqIIuz(q$}QiO8F#$dmY6w&LcV*}HHiA+gCg`F*fBFQ7_4mwkp<=pqzel+rHqfBKdHaHEeDN|>H4CQHF>@>gb`J4 z($+$#S8uH%En><6iIM2Sh9zltxol|;gaB86ph)Qig(e{i`IWF|3@M+Z0>B|G0C>?8 zyCYfUfP`l`GQv#;PQ+_S?fZbM89fKx6QIuUwfR-wPdgC^9>P`-S7R6QHVts;{aF67 z)FA^`vF=}gy$8M5FTCSlKYCkKS)L>va{86Vc~bSgy<2GspYt0=WxDe1SnAHXUj7;B zs|P(QqKXBJ^CS&Nz?0t~urj+dD3ZhQIceVWTiX*pZB0&PF^~c%()3MQKLKYfyT9vQ zpL26hd>0HPA{O?k*h@7zW$|Zz!-R0;az#UR_$q0C`>i zJUeS`^0Uv3_|lW#|a>RlHXmgRtZ|fjJ!L zHOXx=YI{R;QvU&w9<>keol`Up)VvT_?{q+cdEH@4eeK54Omj_qUxF~(V9JAoF8HAp zURupRF{wZLKbz@{#OR{8vTyz~g06&+$l$JVJf#6(_Qrr{5;k8hS1%ICEAJo0y5!5_ z2>kZt!(T2iGT46>b0{_D|}J~KkV4Uv@1w$6 zqjqsE5o%9~MYy1Z=zYxE6EDy&lz^(YNfQ#>{khl)cw?8r$e6fJ^FJg0e_ho787YVc z77NU*udW;Va!}ouvFiSIYjXmg^Zd8J1jPT>Hv{or!YVx7D9y{CWrE}X&;RycckO=; zATjvz41T7Bpi5-GM2~Fn-mc@z_gQ(e|M&S|BS3(66r>fOWl*LIU%N%#7?TRs+Qj{< zypB8X|LhJ#gN*@M`tfcmdalYqOMDMc4ypsZW1u=bBafEo<#j%(l$CtD~bmzwVcS?l)HQ?fP-X%U|y*P0ai?#*t9!{#~WhY(vN8hL_!k)_=y* lPf79p(f_B-;jNu#Fi!I2XSEO^A`tMSD61+{Ed>kyzW`G`Qyc&Q literal 0 HcmV?d00001 diff --git a/internal/api/server_config.go b/internal/api/server_config.go index 316cace..8840e63 100644 --- a/internal/api/server_config.go +++ b/internal/api/server_config.go @@ -1,6 +1,8 @@ package api import ( + "time" + "github.com/allaboutapps/integresql/pkg/util" "github.com/rs/zerolog" ) @@ -21,6 +23,8 @@ type EchoConfig struct { EnableRecoverMiddleware bool EnableRequestIDMiddleware bool EnableTrailingSlashMiddleware bool + EnableTimeoutMiddleware bool + RequestTimeout time.Duration } type LoggerConfig struct { @@ -38,7 +42,7 @@ func DefaultServerConfigFromEnv() ServerConfig { return ServerConfig{ Address: util.GetEnv("INTEGRESQL_ADDRESS", ""), Port: util.GetEnvAsInt("INTEGRESQL_PORT", 5000), - DebugEndpoints: util.GetEnvAsBool("INTEGRESQL_DEBUG_ENDPOINTS", true), // https://golang.org/pkg/net/http/pprof/ + DebugEndpoints: util.GetEnvAsBool("INTEGRESQL_DEBUG_ENDPOINTS", false), // https://golang.org/pkg/net/http/pprof/ Echo: EchoConfig{ Debug: util.GetEnvAsBool("INTEGRESQL_ECHO_DEBUG", false), EnableCORSMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_CORS_MIDDLEWARE", true), @@ -46,10 +50,15 @@ func DefaultServerConfigFromEnv() ServerConfig { EnableRecoverMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_RECOVER_MIDDLEWARE", true), EnableRequestIDMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_REQUEST_ID_MIDDLEWARE", true), EnableTrailingSlashMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_TRAILING_SLASH_MIDDLEWARE", true), + EnableTimeoutMiddleware: util.GetEnvAsBool("INTEGRESQL_ECHO_ENABLE_REQUEST_TIMEOUT_MIDDLEWARE", true), + + // typically these timeouts should be the same as INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS and INTEGRESQL_TEST_DB_GET_TIMEOUT_MS + // pkg/manager/manager_config.go + RequestTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS", 60*1000 /*1 min*/)), // affects INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS and INTEGRESQL_TEST_DB_GET_TIMEOUT_MS }, Logger: LoggerConfig{ Level: util.LogLevelFromString(util.GetEnv("INTEGRESQL_LOGGER_LEVEL", zerolog.InfoLevel.String())), - RequestLevel: util.LogLevelFromString(util.GetEnv("INTEGRESQL_LOGGER_REQUEST_LEVEL", zerolog.DebugLevel.String())), + RequestLevel: util.LogLevelFromString(util.GetEnv("INTEGRESQL_LOGGER_REQUEST_LEVEL", zerolog.InfoLevel.String())), LogRequestBody: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_REQUEST_BODY", false), LogRequestHeader: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_REQUEST_HEADER", false), LogRequestQuery: util.GetEnvAsBool("INTEGRESQL_LOGGER_LOG_REQUEST_QUERY", false), diff --git a/internal/api/templates/templates.go b/internal/api/templates/templates.go index a81d1db..bc10c84 100644 --- a/internal/api/templates/templates.go +++ b/internal/api/templates/templates.go @@ -1,11 +1,9 @@ package templates import ( - "context" "errors" "net/http" "strconv" - "time" "github.com/allaboutapps/integresql/internal/api" "github.com/allaboutapps/integresql/pkg/manager" @@ -29,10 +27,7 @@ func postInitializeTemplate(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusBadRequest, "hash is required") } - ctx, cancel := context.WithTimeout(c.Request().Context(), 30*time.Second) - defer cancel() - - template, err := s.Manager.InitializeTemplateDatabase(ctx, payload.Hash) + template, err := s.Manager.InitializeTemplateDatabase(c.Request().Context(), payload.Hash) if err != nil { if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable @@ -52,10 +47,7 @@ func putFinalizeTemplate(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { hash := c.Param("hash") - ctx, cancel := context.WithTimeout(c.Request().Context(), 10*time.Second) - defer cancel() - - if _, err := s.Manager.FinalizeTemplateDatabase(ctx, hash); err != nil { + if _, err := s.Manager.FinalizeTemplateDatabase(c.Request().Context(), hash); err != nil { if errors.Is(err, manager.ErrTemplateAlreadyInitialized) { // template is initialized, we ignore this error return c.NoContent(http.StatusNoContent) @@ -77,10 +69,7 @@ func deleteDiscardTemplate(s *api.Server) echo.HandlerFunc { return func(c echo.Context) error { hash := c.Param("hash") - ctx, cancel := context.WithTimeout(c.Request().Context(), 10*time.Second) - defer cancel() - - if err := s.Manager.DiscardTemplateDatabase(ctx, hash); err != nil { + if err := s.Manager.DiscardTemplateDatabase(c.Request().Context(), hash); err != nil { if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable } else if errors.Is(err, manager.ErrTemplateNotFound) { @@ -96,13 +85,11 @@ func deleteDiscardTemplate(s *api.Server) echo.HandlerFunc { } func getTestDatabase(s *api.Server) echo.HandlerFunc { + return func(c echo.Context) error { hash := c.Param("hash") - ctx, cancel := context.WithTimeout(c.Request().Context(), 1*time.Minute) - defer cancel() - - test, err := s.Manager.GetTestDatabase(ctx, hash) + test, err := s.Manager.GetTestDatabase(c.Request().Context(), hash) if err != nil { if errors.Is(err, manager.ErrManagerNotReady) { @@ -134,10 +121,7 @@ func postUnlockTestDatabase(s *api.Server) echo.HandlerFunc { return echo.NewHTTPError(http.StatusBadRequest, "invalid test database ID") } - ctx, cancel := context.WithTimeout(c.Request().Context(), 10*time.Second) - defer cancel() - - if err := s.Manager.ReturnTestDatabase(ctx, hash, id); err != nil { + if err := s.Manager.ReturnTestDatabase(c.Request().Context(), hash, id); err != nil { if errors.Is(err, manager.ErrManagerNotReady) { return echo.ErrServiceUnavailable } else if errors.Is(err, manager.ErrTemplateNotFound) { diff --git a/internal/router/router.go b/internal/router/router.go index 4fac61d..0ff08c3 100644 --- a/internal/router/router.go +++ b/internal/router/router.go @@ -61,6 +61,12 @@ func Init(s *api.Server) { log.Warn().Msg("Disabling logger middleware due to environment config") } + if s.Config.Echo.EnableTimeoutMiddleware { + s.Echo.Use(echoMiddleware.TimeoutWithConfig(echoMiddleware.TimeoutConfig{ + Timeout: s.Config.Echo.RequestTimeout, + })) + } + // enable debug endpoints only if requested if s.Config.DebugEndpoints { s.Echo.GET("/debug/*", echo.WrapHandler(http.DefaultServeMux)) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 67f13f0..554ab52 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -157,6 +157,10 @@ func (m Manager) Ready() bool { return m.db != nil } +func (m Manager) Config() ManagerConfig { + return m.config +} + func (m *Manager) Initialize(ctx context.Context) error { log := m.getManagerLogger(ctx, "Initialize") diff --git a/pkg/manager/manager_config.go b/pkg/manager/manager_config.go index d59f000..a37a13e 100644 --- a/pkg/manager/manager_config.go +++ b/pkg/manager/manager_config.go @@ -53,8 +53,11 @@ func DefaultManagerConfigFromEnv() ManagerConfig { // we reuse the same user (PGUSER) and passwort (PGPASSWORT) for the test / template databases by default TestDatabaseOwner: util.GetEnv("INTEGRESQL_TEST_PGUSER", util.GetEnv("INTEGRESQL_PGUSER", util.GetEnv("PGUSER", "postgres"))), TestDatabaseOwnerPassword: util.GetEnv("INTEGRESQL_TEST_PGPASSWORD", util.GetEnv("INTEGRESQL_PGPASSWORD", util.GetEnv("PGPASSWORD", ""))), - TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", 5*60*1000 /*5 min*/)), - TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", 1*60*1000 /*1 min, timeout hardcoded also in GET request handler*/)), + + // typically these timeouts should be the same as INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS + // see internal/api/server_config.go + TemplateFinalizeTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS", util.GetEnvAsInt("INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS", 60*1000 /*1 min*/))), + TestDatabaseGetTimeout: time.Millisecond * time.Duration(util.GetEnvAsInt("INTEGRESQL_TEST_DB_GET_TIMEOUT_MS", util.GetEnvAsInt("INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS", 60*1000 /*1 min*/))), PoolConfig: pool.PoolConfig{ InitialPoolSize: util.GetEnvAsInt("INTEGRESQL_TEST_INITIAL_POOL_SIZE", runtime.NumCPU()), // previously default 10 From 5a5abd1c105428f09af7b87457222aaa39f23d57 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Mon, 29 Jan 2024 18:27:36 +0100 Subject: [PATCH 149/160] ref closes github issues #2 #15 #13 --- CHANGELOG.md | 3 +++ README.md | 24 ++++++++++++------------ 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4dc5b42..1ac54c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,10 +45,12 @@ - Going forward, images are built via GitHub Actions and published to GitHub packages. - ARM Docker images - Arm64 is now supported (Apple Silicon M1/M2/M3), we publish a multi-arch image (`linux/amd64,linux/arm64`). + - Closes [#15](https://github.com/allaboutapps/integresql/issues/15) - We added the `POST /api/v1/templates/:hash/tests/:id/recreate` endpoint to the API. - You can use it to express that you no longer using this database and it can be recreated and returned to the pool. - Using this endpoint means you want to break out of our FIFO (first in, first out) recreating queue and get your test-database recreated as soon as possible. - Explicitly calling recreate is **optional** of course! + - Closes [#2](https://github.com/allaboutapps/integresql/issues/2) - Minor: Added woodpecker/drone setup (internal allaboutapps CI/CD) ### Changed @@ -60,6 +62,7 @@ - Closes [#13](https://github.com/allaboutapps/integresql/issues/13) - Logging and Debugging Improvements - Introduced zerolog for better logging in the pool and manager modules. Debug statements were refined, and unnecessary print debugging was disabled. +- Changed details around installing locally in README.md (still not recommended, use the Docker image instead), closes [#7](https://github.com/allaboutapps/integresql/issues/7) ### Environment Variables diff --git a/README.md b/README.md index f09cdd5..c41d65a 100644 --- a/README.md +++ b/README.md @@ -37,22 +37,12 @@ Do your engineers a favour by allowing them to write fast executing, parallel an ## Install -### Install using Docker (preferred) - A minimal Docker image containing a pre-built `IntegreSQL` executable is available at [Github Packages](https://github.com/allaboutapps/integresql/releases). ```bash docker pull ghcr.io/allaboutapps/integresql ``` -### Install locally - -Installing `IntegreSQL` locally requires a working [Go](https://golang.org/dl/) (1.14 or above) environment. Install the `IntegreSQL` executable to your Go bin folder: - -```bash -go get github.com/allaboutapps/integresql/cmd/server -``` - ## Usage ### Run using Docker (preferred) @@ -127,9 +117,19 @@ volumes: You may also refer to our [go-starter `docker-compose.yml`](https://github.com/allaboutapps/go-starter/blob/master/docker-compose.yml). -### Run locally +### Run locally (not recommended) + +Installing `IntegreSQL` locally requires a working [Go](https://golang.org/dl/) (1.14 or above) environment. Install the `IntegreSQL` executable to your Go bin folder: + +```bash +# This installs the latest version of IntegreSQL into your $GOBIN +go install github.com/allaboutapps/integresql/cmd/server@latest + +# you may want to rename the binary to integresql after installing: +mv $GOBIN/server $GOBIN/integresql +``` -Running the `IntegreSQL` server locally requires configuration via exported environment variables (see below): +Running the `IntegreSQL` server locally requires configuration via exported environment variables (see below). ```bash export INTEGRESQL_PORT=5000 From 989cb2f64c97efa26d93f61da275edde26b810dd Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Mon, 29 Jan 2024 19:15:55 +0100 Subject: [PATCH 150/160] doc env --- README.md | 79 +++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index c41d65a..c5670ca 100644 --- a/README.md +++ b/README.md @@ -4,19 +4,18 @@ Do your engineers a favour by allowing them to write fast executing, parallel and deterministic integration tests utilizing **real** PostgreSQL test databases. Resemble your live environment in tests as close as possible. -[![](https://img.shields.io/docker/image-size/allaboutapps/integresql)](https://hub.docker.com/r/allaboutapps/integresql) [![](https://img.shields.io/docker/pulls/allaboutapps/integresql)](https://hub.docker.com/r/allaboutapps/integresql) [![Docker Cloud Build Status](https://img.shields.io/docker/cloud/build/allaboutapps/integresql)](https://hub.docker.com/r/allaboutapps/integresql) [![](https://goreportcard.com/badge/github.com/allaboutapps/integresql)](https://goreportcard.com/report/github.com/allaboutapps/integresql) ![](https://github.com/allaboutapps/integresql/workflows/build/badge.svg?branch=master) +[![](https://goreportcard.com/badge/github.com/allaboutapps/integresql)](https://goreportcard.com/report/github.com/allaboutapps/integresql) ![](https://github.com/allaboutapps/integresql/workflows/build/badge.svg?branch=master) - [IntegreSQL](#integresql) - - [Integrate by client lib](#integrate-by-client-lib) - - [Integrate by RESTful JSON calls](#integrate-by-restful-json-calls) - - [Demo](#demo) - [Install](#install) - - [Install using Docker (preferred)](#install-using-docker-preferred) - - [Install locally](#install-locally) - - [Configuration](#configuration) - [Usage](#usage) - [Run using Docker (preferred)](#run-using-docker-preferred) - - [Run locally](#run-locally) + - [Run locally (not recommended)](#run-locally-not-recommended) + - [Configuration](#configuration) + - [Integrate](#integrate) + - [Integrate by RESTful JSON calls](#integrate-by-restful-json-calls) + - [Integrate by client lib](#integrate-by-client-lib) + - [Demo](#demo) - [Background](#background) - [Approach 0: Leaking database mutations for subsequent tests](#approach-0-leaking-database-mutations-for-subsequent-tests) - [Approach 1: Isolating by resetting](#approach-1-isolating-by-resetting) @@ -28,13 +27,16 @@ Do your engineers a favour by allowing them to write fast executing, parallel an - [Approach 3c benchmark 1: Baseline](#approach-3c-benchmark-1-baseline) - [Approach 3c benchmark 2: Small project](#approach-3c-benchmark-2-small-project) - [Final approach: IntegreSQL](#final-approach-integresql) + - [Benchmarks](#benchmarks) + - [Benchmark v1.1.0 vs v1.0.0](#benchmark-v110-vs-v100) - [Contributing](#contributing) - [Development setup](#development-setup) - [Development quickstart](#development-quickstart) - [Maintainers](#maintainers) - - [Previous maintainers](#previous-maintainers) + - [Previous maintainers](#previous-maintainers) - [License](#license) + ## Install A minimal Docker image containing a pre-built `IntegreSQL` executable is available at [Github Packages](https://github.com/allaboutapps/integresql/releases). @@ -141,25 +143,50 @@ integresql ## Configuration +> TODO ENV VARIABLES! + `IntegreSQL` requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: -| Description | Environment variable | Default | Required | -| ----------------------------------------------------------------- | ------------------------------------- | -------------------- | -------- | -| IntegreSQL: listen address (defaults to all if empty) | `INTEGRESQL_ADDRESS` | `""` | | -| IntegreSQL: port | `INTEGRESQL_PORT` | `5000` | | -| PostgreSQL: host | `INTEGRESQL_PGHOST`, `PGHOST` | `"127.0.0.1"` | Yes | -| PostgreSQL: port | `INTEGRESQL_PGPORT`, `PGPORT` | `5432` | | -| PostgreSQL: username | `INTEGRESQL_PGUSER`, `PGUSER`, `USER` | `"postgres"` | Yes | -| PostgreSQL: password | `INTEGRESQL_PGPASSWORD`, `PGPASSWORD` | `""` | Yes | -| PostgreSQL: database for manager | `INTEGRESQL_PGDATABASE` | `"postgres"` | | -| PostgreSQL: template database to use | `INTEGRESQL_ROOT_TEMPLATE` | `"template0"` | | -| Managed databases: prefix | `INTEGRESQL_DB_PREFIX` | `"integresql"` | | -| Managed *template* databases: prefix `integresql_template_` | `INTEGRESQL_TEMPLATE_DB_PREFIX` | `"template"` | | -| Managed *test* databases: prefix `integresql_test__` | `INTEGRESQL_TEST_DB_PREFIX` | `"test"` | | -| Managed *test* databases: username | `INTEGRESQL_TEST_PGUSER` | PostgreSQL: username | | -| Managed *test* databases: password | `INTEGRESQL_TEST_PGPASSWORD` | PostgreSQL: password | | -| Managed *test* databases: minimal test pool size | `INTEGRESQL_TEST_INITIAL_POOL_SIZE` | `10` | | -| Managed *test* databases: maximal test pool size | `INTEGRESQL_TEST_MAX_POOL_SIZE` | `500` | | +| Description | Environment variable | Required | Default | +| ---------------------------------------------------------------------------------------------------- | --------------------------------------------------- | -------- | --------------------------------------------------------- | +| Server listen address (defaults to all if empty) | `INTEGRESQL_ADDRESS` | | `""` | +| Server port | `INTEGRESQL_PORT` | | `5000` | +| PostgreSQL: host | `INTEGRESQL_PGHOST`, `PGHOST` | Yes | `"127.0.0.1"` | +| PostgreSQL: port | `INTEGRESQL_PGPORT`, `PGPORT` | | `5432` | +| PostgreSQL: username | `INTEGRESQL_PGUSER`, `PGUSER`, `USER` | Yes | `"postgres"` | +| PostgreSQL: password | `INTEGRESQL_PGPASSWORD`, `PGPASSWORD` | Yes | `""` | +| PostgreSQL: database for manager | `INTEGRESQL_PGDATABASE` | | `"postgres"` | +| PostgreSQL: template database to use | `INTEGRESQL_ROOT_TEMPLATE` | | `"template0"` | +| Managed databases: prefix | `INTEGRESQL_DB_PREFIX` | | `"integresql"` | +| Managed *template* databases: prefix `integresql_template_` | `INTEGRESQL_TEMPLATE_DB_PREFIX` | | `"template"` | +| Managed *test* databases: prefix `integresql_test__` | `INTEGRESQL_TEST_DB_PREFIX` | | `"test"` | +| Managed *test* databases: username | `INTEGRESQL_TEST_PGUSER` | | PostgreSQL: username | +| Managed *test* databases: password | `INTEGRESQL_TEST_PGPASSWORD` | | PostgreSQL: password | +| Managed *test* databases: minimal test pool size | `INTEGRESQL_TEST_INITIAL_POOL_SIZE` | | [`runtime.NumCPU()`](https://pkg.go.dev/runtime#NumCPU) | +| Managed *test* databases: maximal test pool size | `INTEGRESQL_TEST_MAX_POOL_SIZE` | | [`runtime.NumCPU()*4`](https://pkg.go.dev/runtime#NumCPU) | +| Maximal number of pool tasks running in parallel | `INTEGRESQL_POOL_MAX_PARALLEL_TASKS` | | [`runtime.NumCPU()`](https://pkg.go.dev/runtime#NumCPU) | +| Minimal time to wait after a test db recreate has failed | `INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MIN_MS` | | `250`ms | +| The maximum possible sleep time between recreation retries | `INTEGRESQL_TEST_DB_RETRY_RECREATE_SLEEP_MAX_MS` | | `3000`ms | +| Get test-database blocks auto-recreation (FIFO) for this duration | `INTEGRESQL_TEST_DB_MINIMAL_LIFETIME_MS` | | `250`ms | +| Internal time to wait for a template-database to transition into the 'finalized' state | `INTEGRESQL_TEMPLATE_FINALIZE_TIMEOUT_MS` | | `60000`ms | +| Internal time to wait for a ready database | `INTEGRESQL_TEST_DB_GET_TIMEOUT_MS` | | `60000`ms | +| Enables [pprof debug endpoints](https://golang.org/pkg/net/http/pprof/) under `/debug/*` | `INTEGRESQL_DEBUG_ENDPOINTS` | | `false` | +| Enables [echo framework debug mode](https://echo.labstack.com/docs/customization) | `INTEGRESQL_ECHO_DEBUG` | | `false` | +| [Enables CORS](https://echo.labstack.com/docs/middleware/cors) | `INTEGRESQL_ECHO_ENABLE_CORS_MIDDLEWARE` | | `true` | +| [Enables logger](https://echo.labstack.com/docs/middleware/logger) | `INTEGRESQL_ECHO_ENABLE_LOGGER_MIDDLEWARE` | | `true` | +| [Enables recover](https://echo.labstack.com/docs/middleware/recover) | `INTEGRESQL_ECHO_ENABLE_RECOVER_MIDDLEWARE` | | `true` | +| [Sets request_id to context](https://echo.labstack.com/docs/middleware/request-id) | `INTEGRESQL_ECHO_ENABLE_REQUEST_ID_MIDDLEWARE` | | `true` | +| [Auto-adds trailing slash](https://echo.labstack.com/docs/middleware/trailing-slash) | `INTEGRESQL_ECHO_ENABLE_TRAILING_SLASH_MIDDLEWARE` | | `true` | +| [Enables timeout middleware](https://echo.labstack.com/docs/middleware/timeout) | `INTEGRESQL_ECHO_ENABLE_REQUEST_TIMEOUT_MIDDLEWARE` | | `true` | +| Generic timeout handling for most endpoints | `INTEGRESQL_ECHO_REQUEST_TIMEOUT_MS` | | `60000`ms | +| Show logs of [severity](https://github.com/rs/zerolog?tab=readme-ov-file#leveled-logging) | `INTEGRESQL_LOGGER_LEVEL` | | `"info"` | +| Request log [severity]([severity](https://github.com/rs/zerolog?tab=readme-ov-file#leveled-logging)) | `INTEGRESQL_LOGGER_REQUEST_LEVEL` | | `"info"` | +| Should the request-log include the body? | `INTEGRESQL_LOGGER_LOG_REQUEST_BODY` | | `false` | +| Should the request-log include headers? | `INTEGRESQL_LOGGER_LOG_REQUEST_HEADER` | | `false` | +| Should the request-log include the query? | `INTEGRESQL_LOGGER_LOG_REQUEST_QUERY` | | `false` | +| Should the request-log include the response body? | `INTEGRESQL_LOGGER_LOG_RESPONSE_BODY` | | `false` | +| Should the request-log include the response header? | `INTEGRESQL_LOGGER_LOG_RESPONSE_HEADER` | | `false` | +| Should the console logger pretty-print the log (instead of json)? | `INTEGRESQL_LOGGER_PRETTY_PRINT_CONSOLE` | | `false` | ## Integrate From a74c36f62433cddd8a8f68de280545ef33a467a1 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Mon, 29 Jan 2024 19:18:01 +0100 Subject: [PATCH 151/160] fix env vars doc --- CHANGELOG.md | 4 ++-- README.md | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ac54c8..b21a86f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,12 +70,12 @@ There have been quite a few additions and changes, thus we have the in-depth det #### Manager/Pool-related -- Added `INTEGRESQL_TEST_MAX_POOL_SIZE`: +- Changed `INTEGRESQL_TEST_MAX_POOL_SIZE`: - Maximal pool size that won't be exceeded - Defaults to "your number of CPU cores 4 times" [`runtime.NumCPU()*4`](https://pkg.go.dev/runtime#NumCPU) - Previous default was `500` (hardcoded) - This might be a **significant change** for some usecases, please adjust accordingly. The pooling recreation logic is now much faster, there is typically no need to have such a high limit of test-databases **per pool**! -- Added `INTEGRESQL_TEST_INITIAL_POOL_SIZE`: +- Changed `INTEGRESQL_TEST_INITIAL_POOL_SIZE`: - Initial number of ready DBs prepared in background. The pool is configured to always try to have this number of ready DBs available (it actively tries to recreate databases within the pool in a FIFO manner). - Defaults to [`runtime.NumCPU()`](https://pkg.go.dev/runtime#NumCPU) - Previous default was `10` (hardcoded) diff --git a/README.md b/README.md index c5670ca..0bc5931 100644 --- a/README.md +++ b/README.md @@ -143,8 +143,6 @@ integresql ## Configuration -> TODO ENV VARIABLES! - `IntegreSQL` requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: | Description | Environment variable | Required | Default | From da3ae8d53be82cab4b762c0f85764e9c48a94ebe Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Mon, 29 Jan 2024 19:38:04 +0100 Subject: [PATCH 152/160] improve readme readablity --- README.md | 64 +++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 50 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 0bc5931..fd73789 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # IntegreSQL -`IntegreSQL` manages isolated PostgreSQL databases for your integration tests. +IntegreSQL manages isolated PostgreSQL databases for your integration tests. Do your engineers a favour by allowing them to write fast executing, parallel and deterministic integration tests utilizing **real** PostgreSQL test databases. Resemble your live environment in tests as close as possible. @@ -11,6 +11,8 @@ Do your engineers a favour by allowing them to write fast executing, parallel an - [Usage](#usage) - [Run using Docker (preferred)](#run-using-docker-preferred) - [Run locally (not recommended)](#run-locally-not-recommended) + - [Run within your CI/CD](#run-within-your-cicd) + - [GitHub Actions](#github-actions) - [Configuration](#configuration) - [Integrate](#integrate) - [Integrate by RESTful JSON calls](#integrate-by-restful-json-calls) @@ -39,23 +41,23 @@ Do your engineers a favour by allowing them to write fast executing, parallel an ## Install -A minimal Docker image containing a pre-built `IntegreSQL` executable is available at [Github Packages](https://github.com/allaboutapps/integresql/releases). +A minimal Docker image containing is published on GitHub Packages. See [GitHub Releases](https://github.com/allaboutapps/integresql/releases). ```bash -docker pull ghcr.io/allaboutapps/integresql +docker pull ghcr.io/allaboutapps/integresql: ``` ## Usage ### Run using Docker (preferred) -Simply start the `IntegreSQL` [Docker](https://docs.docker.com/install/) (19.03 or above) container, provide the required environment variables and expose the server port: +Simply start a [Docker](https://docs.docker.com/install/) (19.03 or above) container, provide the required environment variables and expose the server port: ```bash -docker run -d --name integresql -e INTEGRESQL_PORT=5000 -p 5000:5000 allaboutapps/integresql +docker run -d --name integresql -e INTEGRESQL_PORT=5000 -p 5000:5000 ghcr.io/allaboutapps/integresql: ``` -`IntegreSQL` can also be included in your project via [Docker Compose](https://docs.docker.com/compose/install/) (1.25 or above): +The container can also be included in your project via [Docker Compose](https://docs.docker.com/compose/install/) (1.25 or above): ```yaml version: "3.4" @@ -81,7 +83,7 @@ services: # [...] additional main service setup integresql: - image: allaboutapps/integresql:1.0.0 + image: ghcr.io/allaboutapps/integresql: ports: - "5000:5000" depends_on: @@ -121,7 +123,7 @@ You may also refer to our [go-starter `docker-compose.yml`](https://github.com/a ### Run locally (not recommended) -Installing `IntegreSQL` locally requires a working [Go](https://golang.org/dl/) (1.14 or above) environment. Install the `IntegreSQL` executable to your Go bin folder: +Installing IntegreSQL locally requires a working [Go](https://golang.org/dl/) (1.14 or above) environment. Install the `integresql` executable to your Go bin folder: ```bash # This installs the latest version of IntegreSQL into your $GOBIN @@ -131,7 +133,7 @@ go install github.com/allaboutapps/integresql/cmd/server@latest mv $GOBIN/server $GOBIN/integresql ``` -Running the `IntegreSQL` server locally requires configuration via exported environment variables (see below). +Running the IntegreSQL server locally requires configuration via exported environment variables (see below). ```bash export INTEGRESQL_PORT=5000 @@ -141,9 +143,43 @@ export PGPASSWORD=testpass integresql ``` +### Run within your CI/CD + +You'll also want to use integresql within your CI/CD pipeline. We recommend using the Docker image. Simply run it next to the postgres service. + +#### GitHub Actions + +For a working sample see [allaboutapps/go-starter](https://github.com/allaboutapps/go-starter/blob/master/.github/workflows/build-test.yml). + +```yaml +jobs: + build-test: + runs-on: ubuntu-latest + services: + postgres: + image: postgres: + env: + POSTGRES_DB: "development" + POSTGRES_USER: "dbuser" + POSTGRES_PASSWORD: "dbpass" + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + integresql: + image: ghcr.io/allaboutapps/integresql: + env: + PGHOST: "postgres" + PGUSER: "dbuser" + PGPASSWORD: "dbpass" +``` + ## Configuration -`IntegreSQL` requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: +IntegreSQL requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: | Description | Environment variable | Required | Default | | ---------------------------------------------------------------------------------------------------- | --------------------------------------------------- | -------- | --------------------------------------------------------- | @@ -445,7 +481,7 @@ We realized that having the above pool logic directly within the test runner is As we switched to Go as our primary backend engineering language, we needed to rewrite the above logic anyways and decided to provide a safe and language agnostic way to utilize this testing strategy with PostgreSQL. -This is how `IntegreSQL` was born. +This is how IntegreSQL was born. ## Benchmarks @@ -535,14 +571,14 @@ Please make sure to update tests as appropriate. ### Development setup -`IntegreSQL` requires the following local setup for development: +IntegreSQL requires the following local setup for development: - [Docker CE](https://docs.docker.com/install/) (19.03 or above) - [Docker Compose](https://docs.docker.com/compose/install/) (1.25 or above) The project makes use of the [devcontainer functionality](https://code.visualstudio.com/docs/remote/containers) provided by [Visual Studio Code](https://code.visualstudio.com/) so no local installation of a Go compiler is required when using VSCode as an IDE. -Should you prefer to develop `IntegreSQL` without the Docker setup, please ensure a working [Go](https://golang.org/dl/) (1.14 or above) environment has been configured as well as a PostgreSQL instance is available (tested against version 12 or above, but *should* be compatible to lower versions) and the appropriate environment variables have been configured as described in the [Install](#install) section. +Should you prefer to develop IntegreSQL without the Docker setup, please ensure a working [Go](https://golang.org/dl/) (1.14 or above) environment has been configured as well as a PostgreSQL instance is available (tested against version 12 or above, but *should* be compatible to lower versions) and the appropriate environment variables have been configured as described in the [Install](#install) section. ### Development quickstart @@ -584,4 +620,4 @@ integresql ## License -[MIT](LICENSE) © 2020-2024 aaa – all about apps GmbH | Nick Müller | Mario Ranftl and the `IntegreSQL` project contributors +[MIT](LICENSE) © 2020-2024 aaa – all about apps GmbH | Nick Müller | Mario Ranftl and the IntegreSQL project contributors From f496328a2009a1d9e74b1a08665c947fb3553321 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 30 Jan 2024 15:39:15 +0100 Subject: [PATCH 153/160] docs --- README.md | 6 ++-- docs/arch-1.svg | 1 + docs/arch-2.svg | 1 + docs/arch.md | 27 ++++++++++++++ docs/arch.template.md | 70 ++++++++++++++++++++++++++++++++++++ docs/integration-1.svg | 1 + docs/integration.md | 19 ++++++++++ docs/integration.template.md | 57 +++++++++++++++++++++++++++++ 8 files changed, 179 insertions(+), 3 deletions(-) create mode 100644 docs/arch-1.svg create mode 100644 docs/arch-2.svg create mode 100644 docs/arch.md create mode 100644 docs/arch.template.md create mode 100644 docs/integration-1.svg create mode 100644 docs/integration.md create mode 100644 docs/integration.template.md diff --git a/README.md b/README.md index fd73789..222d2ca 100644 --- a/README.md +++ b/README.md @@ -238,7 +238,7 @@ You development/testing flow should look like this: * You trigger your test command. 1..n test runners/processes start in parallel * **Once** per test runner/process: * Get migrations/fixtures files `hash` over all related database files - * `InitializeTemplate: POST /templates`: attempt to create a new PostgreSQL template database identifying though the above hash `payload: {"hash": "string"}` + * `InitializeTemplate: POST /api/v1/templates`: attempt to create a new PostgreSQL template database identified by the above hash `payload: {"hash": "string"}` * `StatusOK: 200` * Truncate * Apply all migrations @@ -417,7 +417,7 @@ This is actually the (simplified) strategy, that we have used in [allaboutapps-b Here's a quick benchmark of how this strategy typically performed back then: -``` +```bash --- -------------------------------- --- replicas switched: 50 avg=11ms min=1ms max=445ms replicas awaited: 1 prebuffer=8 avg=436ms max=436ms @@ -449,7 +449,7 @@ The cool thing about having a warm pool of replicas setup in the background, is Let's look at a sightly bigger testsuite and see how this approach may possibly scale: -``` +```bash --- ----------------------------------- --- replicas switched: 280 avg=26ms min=11ms max=447ms replicas awaited: 1 prebuffer=8 avg=417ms max=417ms diff --git a/docs/arch-1.svg b/docs/arch-1.svg new file mode 100644 index 0000000..47f51bf --- /dev/null +++ b/docs/arch-1.svg @@ -0,0 +1 @@ +ServerManagerTemplate[]templateCollectionHashPool[]poolCollectionHashPoolTestDatabasedatabaseTemplateTemplateDatabasedatabaseTestDatabaseintIDDatabasedatabaseTemplateDatabaseDatabasedatabaseDatabasestringTemplateHashConfigDatabaseConfigownshashasmanagessetsisis \ No newline at end of file diff --git a/docs/arch-2.svg b/docs/arch-2.svg new file mode 100644 index 0000000..ce33a3b --- /dev/null +++ b/docs/arch-2.svg @@ -0,0 +1 @@ +
Task EXTEND
HashPool
TestDatabase
init
GetTestDatabase()
ReturnTestDatabase()
RecreateTestDatabase()
Task CLEAN_DIRTY
generation++
retry (still in use)
ready
dirty
recreating
\ No newline at end of file diff --git a/docs/arch.md b/docs/arch.md new file mode 100644 index 0000000..e13f481 --- /dev/null +++ b/docs/arch.md @@ -0,0 +1,27 @@ + + +# IntegreSQL Architecture + +## Pool structure + +The following describes the relationship between the components of IntegreSQL. + +![diagram](./arch-1.svg) + +## TestDatabase states + +The following describes the state and transitions of a TestDatabase. + +![diagram](./arch-2.svg) \ No newline at end of file diff --git a/docs/arch.template.md b/docs/arch.template.md new file mode 100644 index 0000000..a2a8366 --- /dev/null +++ b/docs/arch.template.md @@ -0,0 +1,70 @@ + + +# IntegreSQL Architecture + +## Pool structure + +The following describes the relationship between the components of IntegreSQL. + +```mermaid +erDiagram + Server ||--o| Manager : owns + Manager { + Template[] templateCollection + HashPool[] poolCollection + } + Manager ||--o{ HashPool : has + Manager ||--o{ Template : has + Template { + TemplateDatabase database + } + HashPool { + TestDatabase database + } + HashPool ||--o{ TestDatabase : "manages" + Template ||--|| TemplateDatabase : "sets" + TestDatabase { + int ID + Database database + } + TemplateDatabase { + Database database + } + Database { + string TemplateHash + Config DatabaseConfig + } + TestDatabase o|--|| Database : "is" + TemplateDatabase o|--|| Database : "is" +``` + +## TestDatabase states + +The following describes the state and transitions of a TestDatabase. + +```mermaid +stateDiagram-v2 + + HashPool --> TestDatabase: Task EXTEND + + state TestDatabase { + [*] --> ready: init + ready --> dirty: GetTestDatabase() + dirty --> ready: ReturnTestDatabase() + dirty --> recreating: RecreateTestDatabase()\nTask CLEAN_DIRTY + recreating --> ready: generation++ + recreating --> recreating: retry (still in use) + } +``` \ No newline at end of file diff --git a/docs/integration-1.svg b/docs/integration-1.svg new file mode 100644 index 0000000..6808002 --- /dev/null +++ b/docs/integration-1.svg @@ -0,0 +1 @@ +PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYouCompute a migrations/fixtures files hash over all related database filesCreate a new PostgreSQL template database identified a the same unique hash payload: {"hash": "string"}Parse the received database connection payload and connect to the template database.Finalize the template so it can be used!You can now get isolated test databases from the pool!In case you have multiple testrunners/processes and call with the same template hash againSome other process has already recreated a PostgreSQL template database for this hash (or is currently doing it), you can just consider the template ready at this point.Typically happens if IntegreSQL cannot communicate withPostgreSQL, fail the test runner processmake testInitializeTemplate: POST /api/v1/templatesCREATE DATABASE template_<hash>StatusOK: 200Truncate, apply all migrations, seed all fixtures, ..., disconnect.FinalizeTemplate: PUT /api/v1/templates/:hashStatusOK: 200InitializeTemplate: POST /api/v1/templatesStatusLocked: 423StatusServiceUnavailable: 503 \ No newline at end of file diff --git a/docs/integration.md b/docs/integration.md new file mode 100644 index 0000000..021eede --- /dev/null +++ b/docs/integration.md @@ -0,0 +1,19 @@ + + +# Integrate via REST API + +## Once per test runner/process: + +![diagram](./integration-1.svg) diff --git a/docs/integration.template.md b/docs/integration.template.md new file mode 100644 index 0000000..3de450a --- /dev/null +++ b/docs/integration.template.md @@ -0,0 +1,57 @@ + + +# Integrate via REST API + +## Once per test runner/process: + +```mermaid +sequenceDiagram + You->>Testrunner: make test + + Note right of Testrunner: Compute a migrations/fixtures files hash over all related database files + + + Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates + + Note over Testrunner,IntegreSQL: Create a new PostgreSQL template database
identified a the same unique hash
payload: {"hash": "string"} + + IntegreSQL->>PostgreSQL: CREATE DATABASE
template_ + PostgreSQL-->>IntegreSQL: + + IntegreSQL-->>Testrunner: StatusOK: 200 + + Note over Testrunner,PostgreSQL: Parse the received database connection payload and connect to the template database. + + Testrunner->>PostgreSQL: Truncate, apply all migrations, seed all fixtures, ..., disconnect. + PostgreSQL-->>Testrunner: + + Note over Testrunner,IntegreSQL: Finalize the template so it can be used! + + Testrunner->>IntegreSQL: FinalizeTemplate: PUT /api/v1/templates/:hash + IntegreSQL-->>Testrunner: StatusOK: 200 + + Note over Testrunner,IntegreSQL: You can now get isolated test databases from the pool! + + Note over Testrunner,IntegreSQL: In case you have multiple testrunners/processes
and call with the same template hash again + + Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates + IntegreSQL-->>Testrunner: StatusLocked: 423 + + Note over Testrunner,IntegreSQL: Some other process has already recreated
a PostgreSQL template database for this hash
(or is currently doing it), you can just consider
the template ready at this point. + + IntegreSQL-->>Testrunner: StatusServiceUnavailable: 503 + + Note over Testrunner,IntegreSQL: Typically happens if IntegreSQL cannot communicate with
PostgreSQL, fail the test runner process +``` From 8b1bba09d3bc7083ad2c03707a4a6d65d5067c4a Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 30 Jan 2024 18:24:13 +0100 Subject: [PATCH 154/160] integration diagrams --- README.md | 2 +- docs/integration-1.svg | 2 +- docs/integration-2.svg | 1 + docs/integration-3.svg | 1 + docs/integration.md | 16 +++++++++++++ docs/integration.template.md | 44 +++++++++++++++++++++++++++++------- 6 files changed, 56 insertions(+), 10 deletions(-) create mode 100644 docs/integration-2.svg create mode 100644 docs/integration-3.svg diff --git a/README.md b/README.md index 222d2ca..595e801 100644 --- a/README.md +++ b/README.md @@ -231,7 +231,7 @@ You will typically want to integrate by a client lib (see below), but you can al ### Integrate by RESTful JSON calls -You development/testing flow should look like this: +Your development/testing flow should look like this: * **Start IntegreSQL** and leave it running **in the background** (your PostgreSQL template and test database pool will always be warm) * ... diff --git a/docs/integration-1.svg b/docs/integration-1.svg index 6808002..e4c622f 100644 --- a/docs/integration-1.svg +++ b/docs/integration-1.svg @@ -1 +1 @@ -PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYouCompute a migrations/fixtures files hash over all related database filesCreate a new PostgreSQL template database identified a the same unique hash payload: {"hash": "string"}Parse the received database connection payload and connect to the template database.Finalize the template so it can be used!You can now get isolated test databases from the pool!In case you have multiple testrunners/processes and call with the same template hash againSome other process has already recreated a PostgreSQL template database for this hash (or is currently doing it), you can just consider the template ready at this point.Typically happens if IntegreSQL cannot communicate withPostgreSQL, fail the test runner processmake testInitializeTemplate: POST /api/v1/templatesCREATE DATABASE template_<hash>StatusOK: 200Truncate, apply all migrations, seed all fixtures, ..., disconnect.FinalizeTemplate: PUT /api/v1/templates/:hashStatusOK: 200InitializeTemplate: POST /api/v1/templatesStatusLocked: 423StatusServiceUnavailable: 503 \ No newline at end of file +PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYouCompute a hash over all related files that affect your database (migrations, fixtures, imports, etc.)Create a new PostgreSQL template database identified a the same unique hash payload: {"hash": "string"}Parse the received database connection payload and connect to the template database.Finalize the template so it can be used!You can now get isolated test databases for this hash from the pool!make testInitializeTemplate: POST /api/v1/templatesCREATE DATABASE template_<hash>StatusOK: 200Truncate, apply all migrations, seed all fixtures, ..., disconnect.FinalizeTemplate: PUT /api/v1/templates/:hashStatusOK: 200 \ No newline at end of file diff --git a/docs/integration-2.svg b/docs/integration-2.svg new file mode 100644 index 0000000..ae8bf64 --- /dev/null +++ b/docs/integration-2.svg @@ -0,0 +1 @@ +PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYouSubsequent testrunners or multiple processes simply call with the same template hash again.Some other testrunner / process has already recreated this PostgreSQL template database identified by this hash (or is currently doing it), you can just consider the template ready at this point.You can now get isolated test databases for this hash from the pool!make testInitializeTemplate: POST /api/v1/templatesStatusLocked: 423 \ No newline at end of file diff --git a/docs/integration-3.svg b/docs/integration-3.svg new file mode 100644 index 0000000..20d2dba --- /dev/null +++ b/docs/integration-3.svg @@ -0,0 +1 @@ +PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYouTypically happens if IntegreSQL cannot communicate withPostgreSQL, fail the test runner process in this case (e.g. exit 1).make testInitializeTemplate: POST /api/v1/templatesStatusServiceUnavailable: 503 \ No newline at end of file diff --git a/docs/integration.md b/docs/integration.md index 021eede..8fd12d7 100644 --- a/docs/integration.md +++ b/docs/integration.md @@ -14,6 +14,22 @@ Syntax, see https://mermaid.js.org/syntax/entityRelationshipDiagram.html # Integrate via REST API +First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). + +You then trigger your test command (e.g. `make test`). 1..n test runners/processes then start in parallel. + ## Once per test runner/process: +Each test runner that starts need to communicate with IntegreSQL to set 1..n template database pools. The following sections describe the flows/scenarios you need to implement. + +### Testrunner creates a new template database + ![diagram](./integration-1.svg) + +### Testrunner reuses an existing template database + +![diagram](./integration-2.svg) + +### Testrunner errors while template database setup + +![diagram](./integration-3.svg) \ No newline at end of file diff --git a/docs/integration.template.md b/docs/integration.template.md index 3de450a..5d39bdb 100644 --- a/docs/integration.template.md +++ b/docs/integration.template.md @@ -14,19 +14,26 @@ Syntax, see https://mermaid.js.org/syntax/entityRelationshipDiagram.html # Integrate via REST API +First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). + +You then trigger your test command (e.g. `make test`). 1..n test runners/processes then start in parallel. + ## Once per test runner/process: +Each test runner that starts need to communicate with IntegreSQL to set 1..n template database pools. The following sections describe the flows/scenarios you need to implement. + +### Testrunner creates a new template database + ```mermaid sequenceDiagram You->>Testrunner: make test - Note right of Testrunner: Compute a migrations/fixtures files hash over all related database files + Note right of Testrunner: Compute a hash over all related
files that affect your database
(migrations, fixtures, imports, etc.) + Note over Testrunner,IntegreSQL: Create a new PostgreSQL template database
identified a the same unique hash
payload: {"hash": "string"} Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates - Note over Testrunner,IntegreSQL: Create a new PostgreSQL template database
identified a the same unique hash
payload: {"hash": "string"} - IntegreSQL->>PostgreSQL: CREATE DATABASE
template_ PostgreSQL-->>IntegreSQL: @@ -42,16 +49,37 @@ sequenceDiagram Testrunner->>IntegreSQL: FinalizeTemplate: PUT /api/v1/templates/:hash IntegreSQL-->>Testrunner: StatusOK: 200 - Note over Testrunner,IntegreSQL: You can now get isolated test databases from the pool! + Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! +``` + +### Testrunner reuses an existing template database + +```mermaid +sequenceDiagram + + You->>Testrunner: make test - Note over Testrunner,IntegreSQL: In case you have multiple testrunners/processes
and call with the same template hash again + Note over Testrunner,IntegreSQL: Subsequent testrunners or multiple processes
simply call with the same template hash again. Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates IntegreSQL-->>Testrunner: StatusLocked: 423 - Note over Testrunner,IntegreSQL: Some other process has already recreated
a PostgreSQL template database for this hash
(or is currently doing it), you can just consider
the template ready at this point. + Note over Testrunner,IntegreSQL: Some other testrunner / process has already recreated
this PostgreSQL template database identified by this hash
(or is currently doing it), you can just consider
the template ready at this point. - IntegreSQL-->>Testrunner: StatusServiceUnavailable: 503 + Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! - Note over Testrunner,IntegreSQL: Typically happens if IntegreSQL cannot communicate with
PostgreSQL, fail the test runner process ``` + +### Testrunner errors while template database setup + +```mermaid +sequenceDiagram + + You->>Testrunner: make test + + Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates + IntegreSQL-->>Testrunner: StatusServiceUnavailable: 503 + + Note over Testrunner,PostgreSQL: Typically happens if IntegreSQL cannot communicate with
PostgreSQL, fail the test runner process in this case (e.g. exit 1). + +``` \ No newline at end of file From d454660ae2f0573b29d99f5b883a7f315e1bb590 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 30 Jan 2024 19:07:00 +0100 Subject: [PATCH 155/160] prepare move into readme --- docs/integration-4.svg | 1 + docs/integration-5.svg | 1 + docs/integration-6.svg | 1 + docs/integration.md | 68 +++++++++++++++++-- docs/integration.template.md | 128 +++++++++++++++++++++++++++++++++-- 5 files changed, 187 insertions(+), 12 deletions(-) create mode 100644 docs/integration-4.svg create mode 100644 docs/integration-5.svg create mode 100644 docs/integration-6.svg diff --git a/docs/integration-4.svg b/docs/integration-4.svg new file mode 100644 index 0000000..a0bce99 --- /dev/null +++ b/docs/integration-4.svg @@ -0,0 +1 @@ +PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYou...Before each test, get a new isolated test database from the pool for the template hash.Blocks until the template is finalizedThe test databases for the template poolwere already created and are simply returned.Your runner now has a fully isolated PostgreSQL databasefrom our already migrated/seeded template database to use within your test.Run your test code!Your test is finished.GetTestDatabase: GET /api/v1/templates/:hash/testsStatusOK: 200Directly connect to the test database.Disconnect from the test database \ No newline at end of file diff --git a/docs/integration-5.svg b/docs/integration-5.svg new file mode 100644 index 0000000..bfaa721 --- /dev/null +++ b/docs/integration-5.svg @@ -0,0 +1 @@ +PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYou...Run your **readonly** test code!Your **readonly** test is finished. As you did not modify the test database, you can unlock it again(immediately available in the pool again).GetTestDatabase: GET /api/v1/templates/:hash/testsStatusOK: 200Directly connect to the test database.Disconnect from the test databaseReturnTestDatabase: POST /api/v1/templates/:hash/tests/:id/unlock(previously and soft-deprecated DELETE /api/v1/templates/:hash/tests/:id)StatusOK: 200 \ No newline at end of file diff --git a/docs/integration-6.svg b/docs/integration-6.svg new file mode 100644 index 0000000..f67a582 --- /dev/null +++ b/docs/integration-6.svg @@ -0,0 +1 @@ +PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYou...Run your test code!Your test is finished. As you don't want to wait for FIFO autocleaning, you can manually recreate the test database.GetTestDatabase: GET /api/v1/templates/:hash/testsStatusOK: 200Directly connect to the test database.Disconnect from the test databaseRecreateTestDatabase: POST /api/v1/templates/:hash/tests/:id/recreateStatusOK: 200 \ No newline at end of file diff --git a/docs/integration.md b/docs/integration.md index 8fd12d7..4fd5ef0 100644 --- a/docs/integration.md +++ b/docs/integration.md @@ -14,13 +14,27 @@ Syntax, see https://mermaid.js.org/syntax/entityRelationshipDiagram.html # Integrate via REST API -First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). +- [Integrate via REST API](#integrate-via-rest-api) + - [Once per test runner/process](#once-per-test-runnerprocess) + - [Testrunner creates a new template database](#testrunner-creates-a-new-template-database) + - [Testrunner reuses an existing template database](#testrunner-reuses-an-existing-template-database) + - [Failure modes while template database setup: 503](#failure-modes-while-template-database-setup-503) + - [Per each test](#per-each-test) + - [New test database per test](#new-test-database-per-test) + - [Optional: Manually unlocking a test database after a readonly test](#optional-manually-unlocking-a-test-database-after-a-readonly-test) + - [Optional: Manually recreating a test database](#optional-manually-recreating-a-test-database) + - [Failure modes while getting a new test database](#failure-modes-while-getting-a-new-test-database) + - [StatusNotFound 404](#statusnotfound-404) + - [StatusGone 410](#statusgone-410) + - [StatusServiceUnavailable 503](#statusserviceunavailable-503) -You then trigger your test command (e.g. `make test`). 1..n test runners/processes then start in parallel. +First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). When you trigger your test command (e.g. `make test`), 1..n test runners/processes start in parallel. -## Once per test runner/process: +A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. -Each test runner that starts need to communicate with IntegreSQL to set 1..n template database pools. The following sections describe the flows/scenarios you need to implement. +## Once per test runner/process + +Each test runner starts and need to communicate with IntegreSQL to setup 1..n template database pools. The following sections describe the flows/scenarios you need to implement. ### Testrunner creates a new template database @@ -30,6 +44,48 @@ Each test runner that starts need to communicate with IntegreSQL to set 1..n tem ![diagram](./integration-2.svg) -### Testrunner errors while template database setup +### Failure modes while template database setup: 503 + +![diagram](./integration-3.svg) + +## Per each test + +### New test database per test + +Well, this is the normal flow to get a new isolated test database (prepopulated as its created from the template) for your test. + +![diagram](./integration-4.svg) + +### Optional: Manually unlocking a test database after a readonly test + +* Returns the given test DB directly to the pool, without cleaning (recreating it). +* **This is optional!** If you don't call this endpoints, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible, even though it actually had no changes. +* This is useful if you are sure, you did not do any changes to the database and thus want to skip the recreation process by returning it to the pool directly. + + +![diagram](./integration-5.svg) + +### Optional: Manually recreating a test database + +* Recreates the test DB according to the template and returns it back to the pool. +* **This is optional!** If you don't call this endpoint, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible. +* This is useful if you have parallel testing with a mix of very long and super short tests. Our auto–FIFO recreation handling might block there. + +![diagram](./integration-6.svg) + + +### Failure modes while getting a new test database + +Some typical status codes you might encounter while getting a new test database. + +#### StatusNotFound 404 + +Well, seems like someone forgot to call InitializeTemplate or it errored out. + +#### StatusGone 410 + +There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. + +#### StatusServiceUnavailable 503 -![diagram](./integration-3.svg) \ No newline at end of file +Well, typically a PostgreSQL connectivity problem \ No newline at end of file diff --git a/docs/integration.template.md b/docs/integration.template.md index 5d39bdb..c11aaef 100644 --- a/docs/integration.template.md +++ b/docs/integration.template.md @@ -14,13 +14,27 @@ Syntax, see https://mermaid.js.org/syntax/entityRelationshipDiagram.html # Integrate via REST API -First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). +- [Integrate via REST API](#integrate-via-rest-api) + - [Once per test runner/process](#once-per-test-runnerprocess) + - [Testrunner creates a new template database](#testrunner-creates-a-new-template-database) + - [Testrunner reuses an existing template database](#testrunner-reuses-an-existing-template-database) + - [Failure modes while template database setup: 503](#failure-modes-while-template-database-setup-503) + - [Per each test](#per-each-test) + - [New test database per test](#new-test-database-per-test) + - [Optional: Manually unlocking a test database after a readonly test](#optional-manually-unlocking-a-test-database-after-a-readonly-test) + - [Optional: Manually recreating a test database](#optional-manually-recreating-a-test-database) + - [Failure modes while getting a new test database](#failure-modes-while-getting-a-new-test-database) + - [StatusNotFound 404](#statusnotfound-404) + - [StatusGone 410](#statusgone-410) + - [StatusServiceUnavailable 503](#statusserviceunavailable-503) -You then trigger your test command (e.g. `make test`). 1..n test runners/processes then start in parallel. +First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). When you trigger your test command (e.g. `make test`), 1..n test runners/processes start in parallel. -## Once per test runner/process: +A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. -Each test runner that starts need to communicate with IntegreSQL to set 1..n template database pools. The following sections describe the flows/scenarios you need to implement. +## Once per test runner/process + +Each test runner starts and need to communicate with IntegreSQL to setup 1..n template database pools. The following sections describe the flows/scenarios you need to implement. ### Testrunner creates a new template database @@ -70,7 +84,7 @@ sequenceDiagram ``` -### Testrunner errors while template database setup +### Failure modes while template database setup: 503 ```mermaid sequenceDiagram @@ -82,4 +96,106 @@ sequenceDiagram Note over Testrunner,PostgreSQL: Typically happens if IntegreSQL cannot communicate with
PostgreSQL, fail the test runner process in this case (e.g. exit 1). -``` \ No newline at end of file +``` + +## Per each test + +### New test database per test + +Well, this is the normal flow to get a new isolated test database (prepopulated as its created from the template) for your test. + +```mermaid +sequenceDiagram + + Note right of You: ... + + Note right of Testrunner: Before each test, get a new isolated test database
from the pool for the template hash. + + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + + Note over Testrunner,IntegreSQL: Blocks until the template is finalized + + Note right of IntegreSQL: The test databases for the template pool
were already created and are simply returned. + + IntegreSQL-->>Testrunner: StatusOK: 200 + + Note over Testrunner,PostgreSQL: Your runner now has a fully isolated PostgreSQL database
from our already migrated/seeded template database to use within your test. + + Testrunner->>PostgreSQL: Directly connect to the test database. + + Note over Testrunner,PostgreSQL: Run your test code! + + Testrunner-xPostgreSQL: Disconnect from the test database + + Note over Testrunner,PostgreSQL: Your test is finished. +``` + +### Optional: Manually unlocking a test database after a readonly test + +* Returns the given test DB directly to the pool, without cleaning (recreating it). +* **This is optional!** If you don't call this endpoints, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible, even though it actually had no changes. +* This is useful if you are sure, you did not do any changes to the database and thus want to skip the recreation process by returning it to the pool directly. + + +```mermaid +sequenceDiagram + + Note right of You: ... + + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + IntegreSQL-->>Testrunner: StatusOK: 200 + + Testrunner->>PostgreSQL: Directly connect to the test database. + + Note over Testrunner,PostgreSQL: Run your **readonly** test code! + + Testrunner-xPostgreSQL: Disconnect from the test database + + Note over Testrunner,PostgreSQL: Your **readonly** test is finished.
As you did not modify the test database, you can unlock it again
(immediately available in the pool again). + + Testrunner->>IntegreSQL: ReturnTestDatabase: POST /api/v1/templates/:hash/tests/:id/unlock
(previously and soft-deprecated DELETE /api/v1/templates/:hash/tests/:id) + IntegreSQL-->>Testrunner: StatusOK: 200 +``` + +### Optional: Manually recreating a test database + +* Recreates the test DB according to the template and returns it back to the pool. +* **This is optional!** If you don't call this endpoint, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible. +* This is useful if you have parallel testing with a mix of very long and super short tests. Our auto–FIFO recreation handling might block there. + +```mermaid +sequenceDiagram + + Note right of You: ... + + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + IntegreSQL-->>Testrunner: StatusOK: 200 + + Testrunner->>PostgreSQL: Directly connect to the test database. + + Note over Testrunner,PostgreSQL: Run your test code! + + Testrunner-xPostgreSQL: Disconnect from the test database + + Note over Testrunner,PostgreSQL: Your test is finished.
As you don't want to wait for FIFO autocleaning,
you can manually recreate the test database. + + Testrunner->>IntegreSQL: RecreateTestDatabase: POST /api/v1/templates/:hash/tests/:id/recreate + IntegreSQL-->>Testrunner: StatusOK: 200 +``` + + +### Failure modes while getting a new test database + +Some typical status codes you might encounter while getting a new test database. + +#### StatusNotFound 404 + +Well, seems like someone forgot to call InitializeTemplate or it errored out. + +#### StatusGone 410 + +There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. + +#### StatusServiceUnavailable 503 + +Well, typically a PostgreSQL connectivity problem \ No newline at end of file From e71d581ef95ad46cc35b1c13fa6517f19b27b7ca Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 30 Jan 2024 19:14:21 +0100 Subject: [PATCH 156/160] move diagrams into readme --- README.md | 331 ++++++++++++++++++++++++++++------- docs/arch.template.md | 37 ++-- docs/integration.template.md | 50 +++--- 3 files changed, 314 insertions(+), 104 deletions(-) diff --git a/README.md b/README.md index 595e801..7ed2789 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,11 @@ + + # IntegreSQL IntegreSQL manages isolated PostgreSQL databases for your integration tests. @@ -13,11 +21,26 @@ Do your engineers a favour by allowing them to write fast executing, parallel an - [Run locally (not recommended)](#run-locally-not-recommended) - [Run within your CI/CD](#run-within-your-cicd) - [GitHub Actions](#github-actions) - - [Configuration](#configuration) - [Integrate](#integrate) - - [Integrate by RESTful JSON calls](#integrate-by-restful-json-calls) - [Integrate by client lib](#integrate-by-client-lib) + - [Integrate by RESTful JSON calls](#integrate-by-restful-json-calls) + - [Once per test runner/process](#once-per-test-runnerprocess) + - [Testrunner creates a new template database](#testrunner-creates-a-new-template-database) + - [Testrunner reuses an existing template database](#testrunner-reuses-an-existing-template-database) + - [Failure modes while template database setup: 503](#failure-modes-while-template-database-setup-503) + - [Per each test](#per-each-test) + - [New test database per test](#new-test-database-per-test) + - [Optional: Manually unlocking a test database after a readonly test](#optional-manually-unlocking-a-test-database-after-a-readonly-test) + - [Optional: Manually recreating a test database](#optional-manually-recreating-a-test-database) + - [Failure modes while getting a new test database](#failure-modes-while-getting-a-new-test-database) + - [StatusNotFound 404](#statusnotfound-404) + - [StatusGone 410](#statusgone-410) + - [StatusServiceUnavailable 503](#statusserviceunavailable-503) - [Demo](#demo) + - [Configuration](#configuration) + - [Architecture](#architecture) + - [TestDatabase states](#testdatabase-states) + - [Pool structure](#pool-structure) - [Background](#background) - [Approach 0: Leaking database mutations for subsequent tests](#approach-0-leaking-database-mutations-for-subsequent-tests) - [Approach 1: Isolating by resetting](#approach-1-isolating-by-resetting) @@ -177,6 +200,201 @@ jobs: PGPASSWORD: "dbpass" ``` + +## Integrate + +IntegreSQL is a RESTful JSON API distributed as Docker image and go cli. It's language agnostic and manages multiple [PostgreSQL templates](https://supabase.io/blog/2020/07/09/postgresql-templates/) and their separate pool of test databases for your tests. It keeps the pool of test databases warm (as it's running in the background) and is fit for parallel test execution with multiple test runners / processes. + +You will typically want to integrate by a client lib (see below), but you can also integrate by RESTful JSON calls directly. The flow is illustrated in the follow up section. + +### Integrate by client lib + +The flow above might look intimidating at first glance, but trust us, it's simple to integrate especially if there is already an client library available for your specific language. We currently have those: + +* Go: [integresql-client-go](https://github.com/allaboutapps/integresql-client-go) by [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) +* Python: [integresql-client-python](https://github.com/msztolcman/integresql-client-python) by [Marcin Sztolcman - @msztolcman](https://github.com/msztolcman) +* .NET: [IntegreSQL.EF](https://github.com/mcctomsk/IntegreSql.EF) by [Artur Drobinskiy - @Shaddix](https://github.com/Shaddix) +* JavaScript/TypeScript: [@devoxa/integresql-client](https://github.com/devoxa/integresql-client) by [Devoxa - @devoxa](https://github.com/devoxa) +* ... *Add your link here and make a PR* + +### Integrate by RESTful JSON calls + +A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. + +First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). When you trigger your test command (e.g. `make test`), 1..n test runners/processes start in parallel. + +#### Once per test runner/process + +Each test runner starts and need to communicate with IntegreSQL to setup 1..n template database pools. The following sections describe the flows/scenarios you need to implement. + +##### Testrunner creates a new template database + +```mermaid +sequenceDiagram + You->>Testrunner: make test + + Note right of Testrunner: Compute a hash over all related
files that affect your database
(migrations, fixtures, imports, etc.) + + Note over Testrunner,IntegreSQL: Create a new PostgreSQL template database
identified a the same unique hash
payload: {"hash": "string"} + + Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates + + IntegreSQL->>PostgreSQL: CREATE DATABASE
template_ + PostgreSQL-->>IntegreSQL: + + IntegreSQL-->>Testrunner: StatusOK: 200 + + Note over Testrunner,PostgreSQL: Parse the received database connection payload and connect to the template database. + + Testrunner->>PostgreSQL: Truncate, apply all migrations, seed all fixtures, ..., disconnect. + PostgreSQL-->>Testrunner: + + Note over Testrunner,IntegreSQL: Finalize the template so it can be used! + + Testrunner->>IntegreSQL: FinalizeTemplate: PUT /api/v1/templates/:hash + IntegreSQL-->>Testrunner: StatusOK: 200 + + Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! +``` + +##### Testrunner reuses an existing template database + +```mermaid +sequenceDiagram + + You->>Testrunner: make test + + Note over Testrunner,IntegreSQL: Subsequent testrunners or multiple processes
simply call with the same template hash again. + + Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates + IntegreSQL-->>Testrunner: StatusLocked: 423 + + Note over Testrunner,IntegreSQL: Some other testrunner / process has already recreated
this PostgreSQL template database identified by this hash
(or is currently doing it), you can just consider
the template ready at this point. + + Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! + +``` + +##### Failure modes while template database setup: 503 + +```mermaid +sequenceDiagram + + You->>Testrunner: make test + + Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates + IntegreSQL-->>Testrunner: StatusServiceUnavailable: 503 + + Note over Testrunner,PostgreSQL: Typically happens if IntegreSQL cannot communicate with
PostgreSQL, fail the test runner process in this case (e.g. exit 1). + +``` + +#### Per each test + +##### New test database per test + +Well, this is the normal flow to get a new isolated test database (prepopulated as its created from the template) for your test. + +```mermaid +sequenceDiagram + + Note right of You: ... + + Note right of Testrunner: Before each test, get a new isolated test database
from the pool for the template hash. + + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + + Note over Testrunner,IntegreSQL: Blocks until the template is finalized + + Note right of IntegreSQL: The test databases for the template pool
were already created and are simply returned. + + IntegreSQL-->>Testrunner: StatusOK: 200 + + Note over Testrunner,PostgreSQL: Your runner now has a fully isolated PostgreSQL database
from our already migrated/seeded template database to use within your test. + + Testrunner->>PostgreSQL: Directly connect to the test database. + + Note over Testrunner,PostgreSQL: Run your test code! + + Testrunner-xPostgreSQL: Disconnect from the test database + + Note over Testrunner,PostgreSQL: Your test is finished. +``` + +##### Optional: Manually unlocking a test database after a readonly test + +* Returns the given test DB directly to the pool, without cleaning (recreating it). +* **This is optional!** If you don't call this endpoints, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible, even though it actually had no changes. +* This is useful if you are sure, you did not do any changes to the database and thus want to skip the recreation process by returning it to the pool directly. + + +```mermaid +sequenceDiagram + + Note right of You: ... + + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + IntegreSQL-->>Testrunner: StatusOK: 200 + + Testrunner->>PostgreSQL: Directly connect to the test database. + + Note over Testrunner,PostgreSQL: Run your **readonly** test code! + + Testrunner-xPostgreSQL: Disconnect from the test database + + Note over Testrunner,PostgreSQL: Your **readonly** test is finished.
As you did not modify the test database, you can unlock it again
(immediately available in the pool again). + + Testrunner->>IntegreSQL: ReturnTestDatabase: POST /api/v1/templates/:hash/tests/:id/unlock
(previously and soft-deprecated DELETE /api/v1/templates/:hash/tests/:id) + IntegreSQL-->>Testrunner: StatusOK: 200 +``` + +##### Optional: Manually recreating a test database + +* Recreates the test DB according to the template and returns it back to the pool. +* **This is optional!** If you don't call this endpoint, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible. +* This is useful if you have parallel testing with a mix of very long and super short tests. Our auto–FIFO recreation handling might block there. + +```mermaid +sequenceDiagram + + Note right of You: ... + + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + IntegreSQL-->>Testrunner: StatusOK: 200 + + Testrunner->>PostgreSQL: Directly connect to the test database. + + Note over Testrunner,PostgreSQL: Run your test code! + + Testrunner-xPostgreSQL: Disconnect from the test database + + Note over Testrunner,PostgreSQL: Your test is finished.
As you don't want to wait for FIFO autocleaning,
you can manually recreate the test database. + + Testrunner->>IntegreSQL: RecreateTestDatabase: POST /api/v1/templates/:hash/tests/:id/recreate + IntegreSQL-->>Testrunner: StatusOK: 200 +``` + + +##### Failure modes while getting a new test database + +Some typical status codes you might encounter while getting a new test database. + +###### StatusNotFound 404 + +Well, seems like someone forgot to call InitializeTemplate or it errored out. + +###### StatusGone 410 + +There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. + +###### StatusServiceUnavailable 503 + +Well, typically a PostgreSQL connectivity problem + +#### Demo + +If you want to take a look on how we integrate IntegreSQL - 🤭 - please just try our [go-starter](https://github.com/allaboutapps/go-starter) project or take a look at our [test_database setup code](https://github.com/allaboutapps/go-starter/blob/master/internal/test/test_database.go). + ## Configuration IntegreSQL requires little configuration, all of which has to be provided via environment variables (due to the intended usage in a Docker environment). The following settings are available: @@ -223,73 +441,64 @@ IntegreSQL requires little configuration, all of which has to be provided via en | Should the console logger pretty-print the log (instead of json)? | `INTEGRESQL_LOGGER_PRETTY_PRINT_CONSOLE` | | `false` | -## Integrate +## Architecture -IntegreSQL is a RESTful JSON API distributed as Docker image or go cli. It's language agnostic and manages multiple [PostgreSQL templates](https://supabase.io/blog/2020/07/09/postgresql-templates/) and their separate pool of test databases for your tests. It keeps the pool of test databases warm (as it's running in the background) and is fit for parallel test execution with multiple test runners / processes. +### TestDatabase states -You will typically want to integrate by a client lib (see below), but you can also integrate by RESTful JSON calls directly. The flow is introducd below. +The following describes the state and transitions of a TestDatabase. -### Integrate by RESTful JSON calls - -Your development/testing flow should look like this: - -* **Start IntegreSQL** and leave it running **in the background** (your PostgreSQL template and test database pool will always be warm) -* ... -* You trigger your test command. 1..n test runners/processes start in parallel -* **Once** per test runner/process: - * Get migrations/fixtures files `hash` over all related database files - * `InitializeTemplate: POST /api/v1/templates`: attempt to create a new PostgreSQL template database identified by the above hash `payload: {"hash": "string"}` - * `StatusOK: 200` - * Truncate - * Apply all migrations - * Seed all fixtures - * `FinalizeTemplate: PUT /api/v1/templates/:hash` - * If you encountered any template setup errors call `DiscardTemplate: DELETE /api/v1/templates/:hash` - * `StatusLocked: 423` - * Some other process has already recreated a PostgreSQL template database for this `hash` (or is currently doing it), you can just consider the template ready at this point. - * `StatusServiceUnavailable: 503` - * Typically happens if IntegreSQL cannot communicate with PostgreSQL, fail the test runner process -* **Before each** test `GetTestDatabase: GET /api/v1/templates/:hash/tests` - * Blocks until the template database is finalized (via `FinalizeTemplate`) - * `StatusOK: 200` - * You get a fully isolated PostgreSQL database from our already migrated/seeded template database to use within your test - * `StatusNotFound: 404` - * Well, seems like someone forgot to call `InitializeTemplate` or it errored out. - * `StatusGone: 410` - * There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. - * `StatusServiceUnavailable: 503` - * Well, typically a PostgreSQL connectivity problem -* Utilizing the isolated PostgreSQL test database received from IntegreSQL for each (parallel) test: - * **Run your test code** -* **After each** test **optional**: - * `RecreateTestDatabase: POST /api/v1/templates/:hash/tests/:id/recreate` - * Recreates the test DB according to the template and returns it back to the pool. - * **This is optional!** If you don't call this endpoint, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible. - * This is useful if you have parallel testing with a mix of very long and super short tests. Our auto–FIFO recreation handling might block there. - * `ReturnTestDatabase: POST /api/v1/templates/:hash/tests/:id/unlock` (previously and soft-deprecated `DELETE /api/v1/templates/:hash/tests/:id`) - * Returns the given test DB directly to the pool, without cleaning (recreating it). - * **This is optional!** If you don't call this endpoints, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible, even though it actually had no changes. - * This is useful if you are sure, you did not do any changes to the database and thus want to skip the recreation process by returning it to the pool directly. - -* 1..n test runners end -* ... -* Subsequent 1..n test runners start/end in parallel and reuse the above logic - -A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. +```mermaid +stateDiagram-v2 -### Integrate by client lib + HashPool --> TestDatabase: Task EXTEND -The flow above might look intimidating at first glance, but trust us, it's simple to integrate especially if there is already an client library available for your specific language. We currently have those: + state TestDatabase { + [*] --> ready: init + ready --> dirty: GetTestDatabase() + dirty --> ready: ReturnTestDatabase() + dirty --> recreating: RecreateTestDatabase()\nTask CLEAN_DIRTY + recreating --> ready: generation++ + recreating --> recreating: retry (still in use) + } +``` -* Go: [integresql-client-go](https://github.com/allaboutapps/integresql-client-go) by [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) -* Python: [integresql-client-python](https://github.com/msztolcman/integresql-client-python) by [Marcin Sztolcman - @msztolcman](https://github.com/msztolcman) -* .NET: [IntegreSQL.EF](https://github.com/mcctomsk/IntegreSql.EF) by [Artur Drobinskiy - @Shaddix](https://github.com/Shaddix) -* JavaScript/TypeScript: [@devoxa/integresql-client](https://github.com/devoxa/integresql-client) by [Devoxa - @devoxa](https://github.com/devoxa) -* ... *Add your link here and make a PR* +### Pool structure + +The following describes the relationship between the components of IntegreSQL. + +```mermaid +erDiagram + Server ||--o| Manager : owns + Manager { + Template[] templateCollection + HashPool[] poolCollection + } + Manager ||--o{ HashPool : has + Manager ||--o{ Template : has + Template { + TemplateDatabase database + } + HashPool { + TestDatabase database + } + HashPool ||--o{ TestDatabase : "manages" + Template ||--|| TemplateDatabase : "sets" + TestDatabase { + int ID + Database database + } + TemplateDatabase { + Database database + } + Database { + string TemplateHash + Config DatabaseConfig + } + TestDatabase o|--|| Database : "is" + TemplateDatabase o|--|| Database : "is" +``` -#### Demo -If you want to take a look on how we integrate IntegreSQL - 🤭 - please just try our [go-starter](https://github.com/allaboutapps/go-starter) project or take a look at our [test_database setup code](https://github.com/allaboutapps/go-starter/blob/master/internal/test/test_database.go). ## Background diff --git a/docs/arch.template.md b/docs/arch.template.md index a2a8366..8d96ee7 100644 --- a/docs/arch.template.md +++ b/docs/arch.template.md @@ -14,6 +14,25 @@ Syntax, see https://mermaid.js.org/syntax/entityRelationshipDiagram.html # IntegreSQL Architecture +## TestDatabase states + +The following describes the state and transitions of a TestDatabase. + +```mermaid +stateDiagram-v2 + + HashPool --> TestDatabase: Task EXTEND + + state TestDatabase { + [*] --> ready: init + ready --> dirty: GetTestDatabase() + dirty --> ready: ReturnTestDatabase() + dirty --> recreating: RecreateTestDatabase()\nTask CLEAN_DIRTY + recreating --> ready: generation++ + recreating --> recreating: retry (still in use) + } +``` + ## Pool structure The following describes the relationship between the components of IntegreSQL. @@ -50,21 +69,3 @@ erDiagram TemplateDatabase o|--|| Database : "is" ``` -## TestDatabase states - -The following describes the state and transitions of a TestDatabase. - -```mermaid -stateDiagram-v2 - - HashPool --> TestDatabase: Task EXTEND - - state TestDatabase { - [*] --> ready: init - ready --> dirty: GetTestDatabase() - dirty --> ready: ReturnTestDatabase() - dirty --> recreating: RecreateTestDatabase()\nTask CLEAN_DIRTY - recreating --> ready: generation++ - recreating --> recreating: retry (still in use) - } -``` \ No newline at end of file diff --git a/docs/integration.template.md b/docs/integration.template.md index c11aaef..97d1f65 100644 --- a/docs/integration.template.md +++ b/docs/integration.template.md @@ -12,31 +12,31 @@ To Export: Syntax, see https://mermaid.js.org/syntax/entityRelationshipDiagram.html --> -# Integrate via REST API +### Integrate via REST API - [Integrate via REST API](#integrate-via-rest-api) - - [Once per test runner/process](#once-per-test-runnerprocess) - - [Testrunner creates a new template database](#testrunner-creates-a-new-template-database) - - [Testrunner reuses an existing template database](#testrunner-reuses-an-existing-template-database) - - [Failure modes while template database setup: 503](#failure-modes-while-template-database-setup-503) - - [Per each test](#per-each-test) - - [New test database per test](#new-test-database-per-test) - - [Optional: Manually unlocking a test database after a readonly test](#optional-manually-unlocking-a-test-database-after-a-readonly-test) - - [Optional: Manually recreating a test database](#optional-manually-recreating-a-test-database) - - [Failure modes while getting a new test database](#failure-modes-while-getting-a-new-test-database) - - [StatusNotFound 404](#statusnotfound-404) - - [StatusGone 410](#statusgone-410) - - [StatusServiceUnavailable 503](#statusserviceunavailable-503) + - [Once per test runner/process](#once-per-test-runnerprocess) + - [Testrunner creates a new template database](#testrunner-creates-a-new-template-database) + - [Testrunner reuses an existing template database](#testrunner-reuses-an-existing-template-database) + - [Failure modes while template database setup: 503](#failure-modes-while-template-database-setup-503) + - [Per each test](#per-each-test) + - [New test database per test](#new-test-database-per-test) + - [Optional: Manually unlocking a test database after a readonly test](#optional-manually-unlocking-a-test-database-after-a-readonly-test) + - [Optional: Manually recreating a test database](#optional-manually-recreating-a-test-database) + - [Failure modes while getting a new test database](#failure-modes-while-getting-a-new-test-database) + - [StatusNotFound 404](#statusnotfound-404) + - [StatusGone 410](#statusgone-410) + - [StatusServiceUnavailable 503](#statusserviceunavailable-503) First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). When you trigger your test command (e.g. `make test`), 1..n test runners/processes start in parallel. A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. -## Once per test runner/process +#### Once per test runner/process Each test runner starts and need to communicate with IntegreSQL to setup 1..n template database pools. The following sections describe the flows/scenarios you need to implement. -### Testrunner creates a new template database +##### Testrunner creates a new template database ```mermaid sequenceDiagram @@ -66,7 +66,7 @@ sequenceDiagram Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! ``` -### Testrunner reuses an existing template database +##### Testrunner reuses an existing template database ```mermaid sequenceDiagram @@ -84,7 +84,7 @@ sequenceDiagram ``` -### Failure modes while template database setup: 503 +##### Failure modes while template database setup: 503 ```mermaid sequenceDiagram @@ -98,9 +98,9 @@ sequenceDiagram ``` -## Per each test +#### Per each test -### New test database per test +##### New test database per test Well, this is the normal flow to get a new isolated test database (prepopulated as its created from the template) for your test. @@ -130,7 +130,7 @@ sequenceDiagram Note over Testrunner,PostgreSQL: Your test is finished. ``` -### Optional: Manually unlocking a test database after a readonly test +##### Optional: Manually unlocking a test database after a readonly test * Returns the given test DB directly to the pool, without cleaning (recreating it). * **This is optional!** If you don't call this endpoints, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible, even though it actually had no changes. @@ -157,7 +157,7 @@ sequenceDiagram IntegreSQL-->>Testrunner: StatusOK: 200 ``` -### Optional: Manually recreating a test database +##### Optional: Manually recreating a test database * Recreates the test DB according to the template and returns it back to the pool. * **This is optional!** If you don't call this endpoint, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible. @@ -184,18 +184,18 @@ sequenceDiagram ``` -### Failure modes while getting a new test database +##### Failure modes while getting a new test database Some typical status codes you might encounter while getting a new test database. -#### StatusNotFound 404 +###### StatusNotFound 404 Well, seems like someone forgot to call InitializeTemplate or it errored out. -#### StatusGone 410 +###### StatusGone 410 There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. -#### StatusServiceUnavailable 503 +###### StatusServiceUnavailable 503 Well, typically a PostgreSQL connectivity problem \ No newline at end of file From 274e260393d06ee5568110a1cdbfb89778f6aac3 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 30 Jan 2024 19:16:02 +0100 Subject: [PATCH 157/160] rm no longer needed extra md docs --- docs/arch-1.svg | 1 - docs/arch-2.svg | 1 - docs/arch.md | 27 ----- docs/arch.template.md | 71 ------------- docs/integration-1.svg | 1 - docs/integration-2.svg | 1 - docs/integration-3.svg | 1 - docs/integration-4.svg | 1 - docs/integration-5.svg | 1 - docs/integration-6.svg | 1 - docs/integration.md | 91 ---------------- docs/integration.template.md | 201 ----------------------------------- 12 files changed, 398 deletions(-) delete mode 100644 docs/arch-1.svg delete mode 100644 docs/arch-2.svg delete mode 100644 docs/arch.md delete mode 100644 docs/arch.template.md delete mode 100644 docs/integration-1.svg delete mode 100644 docs/integration-2.svg delete mode 100644 docs/integration-3.svg delete mode 100644 docs/integration-4.svg delete mode 100644 docs/integration-5.svg delete mode 100644 docs/integration-6.svg delete mode 100644 docs/integration.md delete mode 100644 docs/integration.template.md diff --git a/docs/arch-1.svg b/docs/arch-1.svg deleted file mode 100644 index 47f51bf..0000000 --- a/docs/arch-1.svg +++ /dev/null @@ -1 +0,0 @@ -ServerManagerTemplate[]templateCollectionHashPool[]poolCollectionHashPoolTestDatabasedatabaseTemplateTemplateDatabasedatabaseTestDatabaseintIDDatabasedatabaseTemplateDatabaseDatabasedatabaseDatabasestringTemplateHashConfigDatabaseConfigownshashasmanagessetsisis \ No newline at end of file diff --git a/docs/arch-2.svg b/docs/arch-2.svg deleted file mode 100644 index ce33a3b..0000000 --- a/docs/arch-2.svg +++ /dev/null @@ -1 +0,0 @@ -
Task EXTEND
HashPool
TestDatabase
init
GetTestDatabase()
ReturnTestDatabase()
RecreateTestDatabase()
Task CLEAN_DIRTY
generation++
retry (still in use)
ready
dirty
recreating
\ No newline at end of file diff --git a/docs/arch.md b/docs/arch.md deleted file mode 100644 index e13f481..0000000 --- a/docs/arch.md +++ /dev/null @@ -1,27 +0,0 @@ - - -# IntegreSQL Architecture - -## Pool structure - -The following describes the relationship between the components of IntegreSQL. - -![diagram](./arch-1.svg) - -## TestDatabase states - -The following describes the state and transitions of a TestDatabase. - -![diagram](./arch-2.svg) \ No newline at end of file diff --git a/docs/arch.template.md b/docs/arch.template.md deleted file mode 100644 index 8d96ee7..0000000 --- a/docs/arch.template.md +++ /dev/null @@ -1,71 +0,0 @@ - - -# IntegreSQL Architecture - -## TestDatabase states - -The following describes the state and transitions of a TestDatabase. - -```mermaid -stateDiagram-v2 - - HashPool --> TestDatabase: Task EXTEND - - state TestDatabase { - [*] --> ready: init - ready --> dirty: GetTestDatabase() - dirty --> ready: ReturnTestDatabase() - dirty --> recreating: RecreateTestDatabase()\nTask CLEAN_DIRTY - recreating --> ready: generation++ - recreating --> recreating: retry (still in use) - } -``` - -## Pool structure - -The following describes the relationship between the components of IntegreSQL. - -```mermaid -erDiagram - Server ||--o| Manager : owns - Manager { - Template[] templateCollection - HashPool[] poolCollection - } - Manager ||--o{ HashPool : has - Manager ||--o{ Template : has - Template { - TemplateDatabase database - } - HashPool { - TestDatabase database - } - HashPool ||--o{ TestDatabase : "manages" - Template ||--|| TemplateDatabase : "sets" - TestDatabase { - int ID - Database database - } - TemplateDatabase { - Database database - } - Database { - string TemplateHash - Config DatabaseConfig - } - TestDatabase o|--|| Database : "is" - TemplateDatabase o|--|| Database : "is" -``` - diff --git a/docs/integration-1.svg b/docs/integration-1.svg deleted file mode 100644 index e4c622f..0000000 --- a/docs/integration-1.svg +++ /dev/null @@ -1 +0,0 @@ -PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYouCompute a hash over all related files that affect your database (migrations, fixtures, imports, etc.)Create a new PostgreSQL template database identified a the same unique hash payload: {"hash": "string"}Parse the received database connection payload and connect to the template database.Finalize the template so it can be used!You can now get isolated test databases for this hash from the pool!make testInitializeTemplate: POST /api/v1/templatesCREATE DATABASE template_<hash>StatusOK: 200Truncate, apply all migrations, seed all fixtures, ..., disconnect.FinalizeTemplate: PUT /api/v1/templates/:hashStatusOK: 200 \ No newline at end of file diff --git a/docs/integration-2.svg b/docs/integration-2.svg deleted file mode 100644 index ae8bf64..0000000 --- a/docs/integration-2.svg +++ /dev/null @@ -1 +0,0 @@ -PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYouSubsequent testrunners or multiple processes simply call with the same template hash again.Some other testrunner / process has already recreated this PostgreSQL template database identified by this hash (or is currently doing it), you can just consider the template ready at this point.You can now get isolated test databases for this hash from the pool!make testInitializeTemplate: POST /api/v1/templatesStatusLocked: 423 \ No newline at end of file diff --git a/docs/integration-3.svg b/docs/integration-3.svg deleted file mode 100644 index 20d2dba..0000000 --- a/docs/integration-3.svg +++ /dev/null @@ -1 +0,0 @@ -PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYouTypically happens if IntegreSQL cannot communicate withPostgreSQL, fail the test runner process in this case (e.g. exit 1).make testInitializeTemplate: POST /api/v1/templatesStatusServiceUnavailable: 503 \ No newline at end of file diff --git a/docs/integration-4.svg b/docs/integration-4.svg deleted file mode 100644 index a0bce99..0000000 --- a/docs/integration-4.svg +++ /dev/null @@ -1 +0,0 @@ -PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYou...Before each test, get a new isolated test database from the pool for the template hash.Blocks until the template is finalizedThe test databases for the template poolwere already created and are simply returned.Your runner now has a fully isolated PostgreSQL databasefrom our already migrated/seeded template database to use within your test.Run your test code!Your test is finished.GetTestDatabase: GET /api/v1/templates/:hash/testsStatusOK: 200Directly connect to the test database.Disconnect from the test database \ No newline at end of file diff --git a/docs/integration-5.svg b/docs/integration-5.svg deleted file mode 100644 index bfaa721..0000000 --- a/docs/integration-5.svg +++ /dev/null @@ -1 +0,0 @@ -PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYou...Run your **readonly** test code!Your **readonly** test is finished. As you did not modify the test database, you can unlock it again(immediately available in the pool again).GetTestDatabase: GET /api/v1/templates/:hash/testsStatusOK: 200Directly connect to the test database.Disconnect from the test databaseReturnTestDatabase: POST /api/v1/templates/:hash/tests/:id/unlock(previously and soft-deprecated DELETE /api/v1/templates/:hash/tests/:id)StatusOK: 200 \ No newline at end of file diff --git a/docs/integration-6.svg b/docs/integration-6.svg deleted file mode 100644 index f67a582..0000000 --- a/docs/integration-6.svg +++ /dev/null @@ -1 +0,0 @@ -PostgreSQLIntegreSQLTestrunnerYouPostgreSQLIntegreSQLTestrunnerYou...Run your test code!Your test is finished. As you don't want to wait for FIFO autocleaning, you can manually recreate the test database.GetTestDatabase: GET /api/v1/templates/:hash/testsStatusOK: 200Directly connect to the test database.Disconnect from the test databaseRecreateTestDatabase: POST /api/v1/templates/:hash/tests/:id/recreateStatusOK: 200 \ No newline at end of file diff --git a/docs/integration.md b/docs/integration.md deleted file mode 100644 index 4fd5ef0..0000000 --- a/docs/integration.md +++ /dev/null @@ -1,91 +0,0 @@ - - -# Integrate via REST API - -- [Integrate via REST API](#integrate-via-rest-api) - - [Once per test runner/process](#once-per-test-runnerprocess) - - [Testrunner creates a new template database](#testrunner-creates-a-new-template-database) - - [Testrunner reuses an existing template database](#testrunner-reuses-an-existing-template-database) - - [Failure modes while template database setup: 503](#failure-modes-while-template-database-setup-503) - - [Per each test](#per-each-test) - - [New test database per test](#new-test-database-per-test) - - [Optional: Manually unlocking a test database after a readonly test](#optional-manually-unlocking-a-test-database-after-a-readonly-test) - - [Optional: Manually recreating a test database](#optional-manually-recreating-a-test-database) - - [Failure modes while getting a new test database](#failure-modes-while-getting-a-new-test-database) - - [StatusNotFound 404](#statusnotfound-404) - - [StatusGone 410](#statusgone-410) - - [StatusServiceUnavailable 503](#statusserviceunavailable-503) - -First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). When you trigger your test command (e.g. `make test`), 1..n test runners/processes start in parallel. - -A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. - -## Once per test runner/process - -Each test runner starts and need to communicate with IntegreSQL to setup 1..n template database pools. The following sections describe the flows/scenarios you need to implement. - -### Testrunner creates a new template database - -![diagram](./integration-1.svg) - -### Testrunner reuses an existing template database - -![diagram](./integration-2.svg) - -### Failure modes while template database setup: 503 - -![diagram](./integration-3.svg) - -## Per each test - -### New test database per test - -Well, this is the normal flow to get a new isolated test database (prepopulated as its created from the template) for your test. - -![diagram](./integration-4.svg) - -### Optional: Manually unlocking a test database after a readonly test - -* Returns the given test DB directly to the pool, without cleaning (recreating it). -* **This is optional!** If you don't call this endpoints, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible, even though it actually had no changes. -* This is useful if you are sure, you did not do any changes to the database and thus want to skip the recreation process by returning it to the pool directly. - - -![diagram](./integration-5.svg) - -### Optional: Manually recreating a test database - -* Recreates the test DB according to the template and returns it back to the pool. -* **This is optional!** If you don't call this endpoint, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible. -* This is useful if you have parallel testing with a mix of very long and super short tests. Our auto–FIFO recreation handling might block there. - -![diagram](./integration-6.svg) - - -### Failure modes while getting a new test database - -Some typical status codes you might encounter while getting a new test database. - -#### StatusNotFound 404 - -Well, seems like someone forgot to call InitializeTemplate or it errored out. - -#### StatusGone 410 - -There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. - -#### StatusServiceUnavailable 503 - -Well, typically a PostgreSQL connectivity problem \ No newline at end of file diff --git a/docs/integration.template.md b/docs/integration.template.md deleted file mode 100644 index 97d1f65..0000000 --- a/docs/integration.template.md +++ /dev/null @@ -1,201 +0,0 @@ - - -### Integrate via REST API - -- [Integrate via REST API](#integrate-via-rest-api) - - [Once per test runner/process](#once-per-test-runnerprocess) - - [Testrunner creates a new template database](#testrunner-creates-a-new-template-database) - - [Testrunner reuses an existing template database](#testrunner-reuses-an-existing-template-database) - - [Failure modes while template database setup: 503](#failure-modes-while-template-database-setup-503) - - [Per each test](#per-each-test) - - [New test database per test](#new-test-database-per-test) - - [Optional: Manually unlocking a test database after a readonly test](#optional-manually-unlocking-a-test-database-after-a-readonly-test) - - [Optional: Manually recreating a test database](#optional-manually-recreating-a-test-database) - - [Failure modes while getting a new test database](#failure-modes-while-getting-a-new-test-database) - - [StatusNotFound 404](#statusnotfound-404) - - [StatusGone 410](#statusgone-410) - - [StatusServiceUnavailable 503](#statusserviceunavailable-503) - -First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). When you trigger your test command (e.g. `make test`), 1..n test runners/processes start in parallel. - -A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. - -#### Once per test runner/process - -Each test runner starts and need to communicate with IntegreSQL to setup 1..n template database pools. The following sections describe the flows/scenarios you need to implement. - -##### Testrunner creates a new template database - -```mermaid -sequenceDiagram - You->>Testrunner: make test - - Note right of Testrunner: Compute a hash over all related
files that affect your database
(migrations, fixtures, imports, etc.) - - Note over Testrunner,IntegreSQL: Create a new PostgreSQL template database
identified a the same unique hash
payload: {"hash": "string"} - - Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates - - IntegreSQL->>PostgreSQL: CREATE DATABASE
template_ - PostgreSQL-->>IntegreSQL: - - IntegreSQL-->>Testrunner: StatusOK: 200 - - Note over Testrunner,PostgreSQL: Parse the received database connection payload and connect to the template database. - - Testrunner->>PostgreSQL: Truncate, apply all migrations, seed all fixtures, ..., disconnect. - PostgreSQL-->>Testrunner: - - Note over Testrunner,IntegreSQL: Finalize the template so it can be used! - - Testrunner->>IntegreSQL: FinalizeTemplate: PUT /api/v1/templates/:hash - IntegreSQL-->>Testrunner: StatusOK: 200 - - Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! -``` - -##### Testrunner reuses an existing template database - -```mermaid -sequenceDiagram - - You->>Testrunner: make test - - Note over Testrunner,IntegreSQL: Subsequent testrunners or multiple processes
simply call with the same template hash again. - - Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates - IntegreSQL-->>Testrunner: StatusLocked: 423 - - Note over Testrunner,IntegreSQL: Some other testrunner / process has already recreated
this PostgreSQL template database identified by this hash
(or is currently doing it), you can just consider
the template ready at this point. - - Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! - -``` - -##### Failure modes while template database setup: 503 - -```mermaid -sequenceDiagram - - You->>Testrunner: make test - - Testrunner->>IntegreSQL: InitializeTemplate: POST /api/v1/templates - IntegreSQL-->>Testrunner: StatusServiceUnavailable: 503 - - Note over Testrunner,PostgreSQL: Typically happens if IntegreSQL cannot communicate with
PostgreSQL, fail the test runner process in this case (e.g. exit 1). - -``` - -#### Per each test - -##### New test database per test - -Well, this is the normal flow to get a new isolated test database (prepopulated as its created from the template) for your test. - -```mermaid -sequenceDiagram - - Note right of You: ... - - Note right of Testrunner: Before each test, get a new isolated test database
from the pool for the template hash. - - Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests - - Note over Testrunner,IntegreSQL: Blocks until the template is finalized - - Note right of IntegreSQL: The test databases for the template pool
were already created and are simply returned. - - IntegreSQL-->>Testrunner: StatusOK: 200 - - Note over Testrunner,PostgreSQL: Your runner now has a fully isolated PostgreSQL database
from our already migrated/seeded template database to use within your test. - - Testrunner->>PostgreSQL: Directly connect to the test database. - - Note over Testrunner,PostgreSQL: Run your test code! - - Testrunner-xPostgreSQL: Disconnect from the test database - - Note over Testrunner,PostgreSQL: Your test is finished. -``` - -##### Optional: Manually unlocking a test database after a readonly test - -* Returns the given test DB directly to the pool, without cleaning (recreating it). -* **This is optional!** If you don't call this endpoints, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible, even though it actually had no changes. -* This is useful if you are sure, you did not do any changes to the database and thus want to skip the recreation process by returning it to the pool directly. - - -```mermaid -sequenceDiagram - - Note right of You: ... - - Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests - IntegreSQL-->>Testrunner: StatusOK: 200 - - Testrunner->>PostgreSQL: Directly connect to the test database. - - Note over Testrunner,PostgreSQL: Run your **readonly** test code! - - Testrunner-xPostgreSQL: Disconnect from the test database - - Note over Testrunner,PostgreSQL: Your **readonly** test is finished.
As you did not modify the test database, you can unlock it again
(immediately available in the pool again). - - Testrunner->>IntegreSQL: ReturnTestDatabase: POST /api/v1/templates/:hash/tests/:id/unlock
(previously and soft-deprecated DELETE /api/v1/templates/:hash/tests/:id) - IntegreSQL-->>Testrunner: StatusOK: 200 -``` - -##### Optional: Manually recreating a test database - -* Recreates the test DB according to the template and returns it back to the pool. -* **This is optional!** If you don't call this endpoint, the test database will be recreated in a FIFO manner (first in, first out) as soon as possible. -* This is useful if you have parallel testing with a mix of very long and super short tests. Our auto–FIFO recreation handling might block there. - -```mermaid -sequenceDiagram - - Note right of You: ... - - Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests - IntegreSQL-->>Testrunner: StatusOK: 200 - - Testrunner->>PostgreSQL: Directly connect to the test database. - - Note over Testrunner,PostgreSQL: Run your test code! - - Testrunner-xPostgreSQL: Disconnect from the test database - - Note over Testrunner,PostgreSQL: Your test is finished.
As you don't want to wait for FIFO autocleaning,
you can manually recreate the test database. - - Testrunner->>IntegreSQL: RecreateTestDatabase: POST /api/v1/templates/:hash/tests/:id/recreate - IntegreSQL-->>Testrunner: StatusOK: 200 -``` - - -##### Failure modes while getting a new test database - -Some typical status codes you might encounter while getting a new test database. - -###### StatusNotFound 404 - -Well, seems like someone forgot to call InitializeTemplate or it errored out. - -###### StatusGone 410 - -There was an error during test setup with our fixtures, someone called `DiscardTemplate`, thus this template cannot be used. - -###### StatusServiceUnavailable 503 - -Well, typically a PostgreSQL connectivity problem \ No newline at end of file From a39cb96797037490bac4cd268c9dcf81367155c4 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 30 Jan 2024 19:37:00 +0100 Subject: [PATCH 158/160] intro diagram --- README.md | 60 +++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 54 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 7ed2789..474837b 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,32 @@ IntegreSQL manages isolated PostgreSQL databases for your integration tests. Do your engineers a favour by allowing them to write fast executing, parallel and deterministic integration tests utilizing **real** PostgreSQL test databases. Resemble your live environment in tests as close as possible. +```mermaid +sequenceDiagram + You->>Testrunner: Start tests + + Testrunner->>IntegreSQL: New template database + IntegreSQL->>PostgreSQL: + PostgreSQL-->>IntegreSQL: + IntegreSQL-->>Testrunner: + + Testrunner->>PostgreSQL: Connect to template database, apply all migrations, seed all fixtures, ..., disconnect. + PostgreSQL-->>Testrunner: + + Testrunner->>IntegreSQL: Finalize the template database + IntegreSQL-->>Testrunner: + + Note over Testrunner,PostgreSQL: Your test runner can now get isolated test databases for this hash from the pool! + + loop Each test + Testrunner->>IntegreSQL: Get test database (looks like template database) + Testrunner->>PostgreSQL: + Note over Testrunner,PostgreSQL: Run your test code in an isolated test database! + + Testrunner-xPostgreSQL: Disconnect from the test database. + end +``` + [![](https://goreportcard.com/badge/github.com/allaboutapps/integresql)](https://goreportcard.com/report/github.com/allaboutapps/integresql) ![](https://github.com/allaboutapps/integresql/workflows/build/badge.svg?branch=master) - [IntegreSQL](#integresql) @@ -209,7 +235,7 @@ You will typically want to integrate by a client lib (see below), but you can al ### Integrate by client lib -The flow above might look intimidating at first glance, but trust us, it's simple to integrate especially if there is already an client library available for your specific language. We currently have those: +It's simple to integrate especially if there is already an client library available for your specific language. We currently have those: * Go: [integresql-client-go](https://github.com/allaboutapps/integresql-client-go) by [Nick Müller - @MorpheusXAUT](https://github.com/MorpheusXAUT) * Python: [integresql-client-python](https://github.com/msztolcman/integresql-client-python) by [Marcin Sztolcman - @msztolcman](https://github.com/msztolcman) @@ -221,7 +247,7 @@ The flow above might look intimidating at first glance, but trust us, it's simpl A really good starting point to write your own integresql-client for a specific language can be found [here (go code)](https://github.com/allaboutapps/integresql-client-go/blob/master/client.go) and [here (godoc)](https://pkg.go.dev/github.com/allaboutapps/integresql-client-go?tab=doc). It's just RESTful JSON after all. -First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). When you trigger your test command (e.g. `make test`), 1..n test runners/processes start in parallel. +First start IntegreSQL and leave it running in the background (your PostgreSQL template and test database pool will then always be warm). When you trigger your test command (e.g. `make test`), 1..n test runners/processes can start in parallel and get ready and isoloated test database from the pool (after the template database(s) was/were initialized). #### Once per test runner/process @@ -246,7 +272,7 @@ sequenceDiagram Note over Testrunner,PostgreSQL: Parse the received database connection payload and connect to the template database. - Testrunner->>PostgreSQL: Truncate, apply all migrations, seed all fixtures, ..., disconnect. + Testrunner->>PostgreSQL: Apply all migrations, seed all fixtures, ..., disconnect. PostgreSQL-->>Testrunner: Note over Testrunner,IntegreSQL: Finalize the template so it can be used! @@ -254,7 +280,12 @@ sequenceDiagram Testrunner->>IntegreSQL: FinalizeTemplate: PUT /api/v1/templates/:hash IntegreSQL-->>Testrunner: StatusOK: 200 - Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! + Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! + + loop Each test + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + Testrunner->>PostgreSQL: + end ``` ##### Testrunner reuses an existing template database @@ -271,7 +302,12 @@ sequenceDiagram Note over Testrunner,IntegreSQL: Some other testrunner / process has already recreated
this PostgreSQL template database identified by this hash
(or is currently doing it), you can just consider
the template ready at this point. - Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! + Note over Testrunner,PostgreSQL: You can now get isolated test databases for this hash from the pool! + + loop Each test + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests + Testrunner->>PostgreSQL: + end ``` @@ -300,6 +336,8 @@ sequenceDiagram Note right of You: ... + loop Each test + Note right of Testrunner: Before each test, get a new isolated test database
from the pool for the template hash. Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests @@ -318,7 +356,9 @@ sequenceDiagram Testrunner-xPostgreSQL: Disconnect from the test database - Note over Testrunner,PostgreSQL: Your test is finished. + Note over Testrunner,PostgreSQL: Your test is finished. + + end ``` ##### Optional: Manually unlocking a test database after a readonly test @@ -333,6 +373,8 @@ sequenceDiagram Note right of You: ... + loop Each test + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests IntegreSQL-->>Testrunner: StatusOK: 200 @@ -346,6 +388,8 @@ sequenceDiagram Testrunner->>IntegreSQL: ReturnTestDatabase: POST /api/v1/templates/:hash/tests/:id/unlock
(previously and soft-deprecated DELETE /api/v1/templates/:hash/tests/:id) IntegreSQL-->>Testrunner: StatusOK: 200 + + end ``` ##### Optional: Manually recreating a test database @@ -359,6 +403,8 @@ sequenceDiagram Note right of You: ... + loop Each test + Testrunner->>IntegreSQL: GetTestDatabase: GET /api/v1/templates/:hash/tests IntegreSQL-->>Testrunner: StatusOK: 200 @@ -372,6 +418,8 @@ sequenceDiagram Testrunner->>IntegreSQL: RecreateTestDatabase: POST /api/v1/templates/:hash/tests/:id/recreate IntegreSQL-->>Testrunner: StatusOK: 200 + + end ``` From 7322909feb6d7db4ebe4a3b9ad5dd8aef22cb6fd Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 30 Jan 2024 19:40:04 +0100 Subject: [PATCH 159/160] readme typos --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 474837b..754b8bd 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ sequenceDiagram ## Install -A minimal Docker image containing is published on GitHub Packages. See [GitHub Releases](https://github.com/allaboutapps/integresql/releases). +A minimal Docker image is published on GitHub Packages. See [GitHub Releases](https://github.com/allaboutapps/integresql/releases). ```bash docker pull ghcr.io/allaboutapps/integresql: @@ -98,6 +98,9 @@ docker pull ghcr.io/allaboutapps/integresql: ## Usage +IntegreSQL is a RESTful JSON API distributed as Docker image and go cli. It's language agnostic and manages multiple [PostgreSQL templates](https://supabase.io/blog/2020/07/09/postgresql-templates/) and their separate pool of test databases for your tests. It keeps the pool of test databases warm (as it's running in the background) and is fit for parallel test execution with multiple test runners / processes. + + ### Run using Docker (preferred) Simply start a [Docker](https://docs.docker.com/install/) (19.03 or above) container, provide the required environment variables and expose the server port: @@ -229,8 +232,6 @@ jobs: ## Integrate -IntegreSQL is a RESTful JSON API distributed as Docker image and go cli. It's language agnostic and manages multiple [PostgreSQL templates](https://supabase.io/blog/2020/07/09/postgresql-templates/) and their separate pool of test databases for your tests. It keeps the pool of test databases warm (as it's running in the background) and is fit for parallel test execution with multiple test runners / processes. - You will typically want to integrate by a client lib (see below), but you can also integrate by RESTful JSON calls directly. The flow is illustrated in the follow up section. ### Integrate by client lib From be36a10b411776e24fcaf069f0735ecfadebbe26 Mon Sep 17 00:00:00 2001 From: Mario Ranftl Date: Tue, 30 Jan 2024 19:43:34 +0100 Subject: [PATCH 160/160] changelog mention diagrams --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b21a86f..2ad908c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,8 @@ - Logging and Debugging Improvements - Introduced zerolog for better logging in the pool and manager modules. Debug statements were refined, and unnecessary print debugging was disabled. - Changed details around installing locally in README.md (still not recommended, use the Docker image instead), closes [#7](https://github.com/allaboutapps/integresql/issues/7) +- Fix documentation / READMEs, especially provide integration diagrams and details on the project architecture + - Closes [#5](https://github.com/allaboutapps/integresql/issues/5) ### Environment Variables