From f7a025762c93e5e7c2103564cd85f6f874e03212 Mon Sep 17 00:00:00 2001 From: Prashanth Pai Date: Fri, 6 Jul 2018 14:02:25 +0530 Subject: [PATCH 1/7] volume-set: Validate expanded group options The options passed to validation function was the original group option present in the client request. This is incorrect and validation used to fail as group options aren't present in xlators. With this change, the expanded group options are passed to validation function. Signed-off-by: Prashanth Pai --- glusterd2/commands/volumes/volume-option.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/glusterd2/commands/volumes/volume-option.go b/glusterd2/commands/volumes/volume-option.go index b264086c7..b8c25dd5c 100644 --- a/glusterd2/commands/volumes/volume-option.go +++ b/glusterd2/commands/volumes/volume-option.go @@ -35,7 +35,7 @@ func optionSetValidate(c transaction.TxnCtx) error { // validateOptions. if err := validateOptions(options, req.Advanced, req.Experimental, req.Deprecated); err != nil { - return fmt.Errorf("Validation failed for volume option: %s", err.Error()) + return fmt.Errorf("validation failed for volume option: %s", err.Error()) } var volinfo volume.Volinfo @@ -43,8 +43,8 @@ func optionSetValidate(c transaction.TxnCtx) error { return err } - if err := validateXlatorOptions(req.Options, &volinfo); err != nil { - return fmt.Errorf("Validation failed for volume option:: %s", err.Error()) + if err := validateXlatorOptions(options, &volinfo); err != nil { + return fmt.Errorf("validation failed for volume option:: %s", err.Error()) } for k, v := range options { From 538c2a4e96b0400b14131e740aed496edd2d4844 Mon Sep 17 00:00:00 2001 From: Prashanth Pai Date: Mon, 9 Jul 2018 15:09:57 +0530 Subject: [PATCH 2/7] options: Explicitly set default boolean value properly An empty default value for boolean type is to be treated as false. This will be used during resetting options to its default value. Signed-off-by: Prashanth Pai --- glusterd2/xlator/load.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/glusterd2/xlator/load.go b/glusterd2/xlator/load.go index 0aceb5276..b80afa940 100644 --- a/glusterd2/xlator/load.go +++ b/glusterd2/xlator/load.go @@ -66,6 +66,12 @@ func structifyOption(cOpt *C.volume_option_t) *options.Option { opt.SetKey = C.GoString(cOpt.setkey) opt.Level = options.OptionLevel(cOpt.level) + // For boolean options, default value isn't set in xlator's option + // table as glusterfs code treats that case as false by default. + if opt.Type == options.OptionTypeBool && opt.DefaultValue == "" { + opt.DefaultValue = "off" + } + return &opt } From ecbead3cf9738e05279e65f7e6c7351d0a25b10a Mon Sep 17 00:00:00 2001 From: Prashanth Pai Date: Mon, 9 Jul 2018 12:32:13 +0530 Subject: [PATCH 3/7] Fix bug in validating internet addresses ...that is used in setting volume options such as this one: server.auth.addr.*.allow 127.0.0.1 Signed-off-by: Prashanth Pai --- glusterd2/xlator/options/options.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/glusterd2/xlator/options/options.go b/glusterd2/xlator/options/options.go index 52f069e9b..693a0d1f9 100644 --- a/glusterd2/xlator/options/options.go +++ b/glusterd2/xlator/options/options.go @@ -247,16 +247,17 @@ func ValidateInternetAddress(o *Option, val string) error { if len(val) == 0 { return ErrInvalidArg } - if !(validate.IsHost(val)) { - return ErrInvalidArg - } else if !(validate.IsIP(val)) { - return ErrInvalidArg - } else if !(validate.IsCIDR(val)) { - return ErrInvalidArg - } else if !(strings.ContainsAny(val, "* & # & ? & ^")) { - return ErrInvalidArg + if validate.IsHost(val) { + return nil + } else if validate.IsIP(val) { + return nil + } else if validate.IsCIDR(val) { + return nil + } else if strings.ContainsAny(val, "* & # & ? & ^") { + return nil } - return nil + + return ErrInvalidArg } // ValidateInternetAddressList validates the Internet Address List From c2ff1c6164aa7f1f2ee84cde1de111d61084cd8b Mon Sep 17 00:00:00 2001 From: Prashanth Pai Date: Fri, 6 Jul 2018 14:05:17 +0530 Subject: [PATCH 4/7] Add support for xlator options containing dots Volume set/reset options can be of one of these forms: .. . As xlator option name such as `transport.socket.ssl-enabled` can themselves contain dots, for the options.SplitKey() function it was hard to figure out which is graph and which is xlator. This change ensures that graph and xlator component are properly split for xlator options that contain dots. Signed-off-by: Prashanth Pai --- glusterd2/xlator/options/utils.go | 43 ++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/glusterd2/xlator/options/utils.go b/glusterd2/xlator/options/utils.go index 47fff77f8..2e54e028e 100644 --- a/glusterd2/xlator/options/utils.go +++ b/glusterd2/xlator/options/utils.go @@ -3,26 +3,51 @@ package options import ( "fmt" "strings" + + "github.com/gluster/glusterd2/pkg/utils" ) // InvalidKeyError is returned by SplitKey if key is not of the correct form. type InvalidKeyError string func (e InvalidKeyError) Error() string { - return fmt.Sprintf("option key not in .. form: %s", string(e)) + return fmt.Sprintf("option key not in [.]. form: %s", string(e)) +} + +var validGraphs = [...]string{ + "brick", + "fuse", + "gfproxy", + "nfs", } // SplitKey returns three strings by breaking key of the form -// [].. into its constituents. is optional. +// [].. into its constituents. is optional. // Returns an InvalidKeyError if key is not of correcf form. func SplitKey(k string) (string, string, string, error) { + var graph, xlator, optName string + tmp := strings.Split(strings.TrimSpace(k), ".") - switch len(tmp) { - case 2: - return "", tmp[0], tmp[1], nil - case 3: - return tmp[0], tmp[1], tmp[2], nil - default: - return "", "", "", InvalidKeyError(k) + if len(tmp) < 2 { + // must at least be of the form . + return graph, xlator, optName, InvalidKeyError(k) } + + if utils.StringInSlice(tmp[0], validGraphs[:]) { + // valid graph present + if len(tmp) < 3 { + // must be of the form .. + return graph, xlator, optName, InvalidKeyError(k) + } + graph = tmp[0] + xlator = tmp[1] + optName = strings.Join(tmp[2:], ".") + } else { + // key is of the format . where itself + // may contain dots. For example: transport.socket.ssl-enabled + xlator = tmp[0] + optName = k[len(xlator)+1:] + } + + return graph, xlator, optName, nil } From a28efc668564f41b9d065584c3fe9425e4ef33cc Mon Sep 17 00:00:00 2001 From: Prashanth Pai Date: Fri, 6 Jul 2018 14:14:15 +0530 Subject: [PATCH 5/7] Add TLS support for client-bricks communication This change enables setting up TLS for client to bricks communication. Unlike glusterd1 which requires two separate options - `client.ssl` and `server.ssl` to be turned on, turning TLS on for a volume in glusterd2 is a single volume set operation. TLS can be enabled for a volume as follows: $ glustercli volume set --advanced tls on To achieve this, when xlator shared ojects are loaded during startup, options present in transport layer (socket.so and rdma.so) are injected into list of options loaded from protocol layer (server.so and client.so). The client and brick volfile expects the transport options to be in client and server xlator sections. Signed-off-by: Prashanth Pai --- glusterd2/commands/volumes/grouped-options.go | 6 ++++ glusterd2/xlator/global.go | 35 +++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/glusterd2/commands/volumes/grouped-options.go b/glusterd2/commands/volumes/grouped-options.go index 1928f5698..957574fbb 100644 --- a/glusterd2/commands/volumes/grouped-options.go +++ b/glusterd2/commands/volumes/grouped-options.go @@ -129,6 +129,12 @@ var defaultGroupOptions = map[string]*api.OptionGroup{ {Name: "features.shard", OnValue: "on"}, {Name: "user.cifs", OnValue: "off"}}, "Enable this profile, if the Gluster Volume is used to store virtual machines"}, + "tls": {"tls", + []api.VolumeOption{ + {Name: "server.transport.socket.ssl-enabled", OnValue: "on"}, + {Name: "client.transport.socket.ssl-enabled", OnValue: "on"}, + }, + "Enable TLS for the volume for both bricks and clients"}, "profile.test": {"profile.test", []api.VolumeOption{{Name: "afr.eager-lock", OnValue: "on"}, {Name: "gfproxy.afr.eager-lock", OnValue: "on"}}, diff --git a/glusterd2/xlator/global.go b/glusterd2/xlator/global.go index dfe2bcb35..2436b2531 100644 --- a/glusterd2/xlator/global.go +++ b/glusterd2/xlator/global.go @@ -35,6 +35,7 @@ func Load() (err error) { } xlMap = xls + injectTransportOptions() loadOptions() return } @@ -57,3 +58,37 @@ func loadOptions() { } } } + +// injectTransportOptions injects options present in transport layer (socket.so +// and rdma.so) into list of options loaded from protocol layer (server.so and +// client.so) +func injectTransportOptions() { + + var transportNames = [...]string{"socket", "rdma"} + transports := make([]*Xlator, 0, 2) + for _, name := range transportNames { + if xl, ok := xlMap[name]; ok { + transports = append(transports, xl) + } + } + + if len(transports) == 0 { + panic("socket.so or rdma.so not found. Please install glusterfs-server package") + } + + for _, transport := range transports { + for _, option := range transport.Options { + // TODO: + // remove this once proper settable flags are set for + // these transport options in glusterfs source + option.Flags = option.Flags | options.OptionFlagSettable + if xl, ok := xlMap["server"]; ok { + xl.Options = append(xl.Options, option) + } + if xl, ok := xlMap["client"]; ok { + option.Flags = option.Flags | options.OptionFlagClientOpt + xl.Options = append(xl.Options, option) + } + } + } +} From febde46f01bb402a384ad1abf75e5cc62aafdfa8 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 12 Jul 2018 14:52:11 +0530 Subject: [PATCH 6/7] code clean up in the glusterd2 plugins Added logging with the log.WithError(). Allocate capacity to slice to reduce the allocation of memory during append. Added UNDO function in txn steps. Signed-off-by: Madhu Rajanna --- plugins/dht/validate.go | 10 +--- plugins/georeplication/rest.go | 79 ++++++++++++-------------- plugins/georeplication/store-utils.go | 14 ++--- plugins/georeplication/transactions.go | 31 +++++----- plugins/glustershd/rest.go | 6 +- 5 files changed, 60 insertions(+), 80 deletions(-) diff --git a/plugins/dht/validate.go b/plugins/dht/validate.go index 40a05f7bb..fc0f21969 100644 --- a/plugins/dht/validate.go +++ b/plugins/dht/validate.go @@ -10,17 +10,13 @@ import ( var names = [...]string{"distribute", "dht"} -func validateOptions(v *volume.Volinfo, key string, value string) error { - var err error +func validateOptions(v *volume.Volinfo, key, value string) error { if strings.Contains(key, "readdirplus-for-dir") { if value == "on" { - val, exists := v.Options["features.cache-invalidation"] - if exists && val == "on" { + if v, ok := v.Options["features.cache-invalidation"]; ok && v == "on" { return nil } - err = fmt.Errorf("Enable \"features.cache-invalidation\" before enabling %s", - key) - return err + return fmt.Errorf("Enable \"features.cache-invalidation\" before enabling %s", key) } } return nil diff --git a/plugins/georeplication/rest.go b/plugins/georeplication/rest.go index 24cb2f6b4..e891984de 100644 --- a/plugins/georeplication/rest.go +++ b/plugins/georeplication/rest.go @@ -25,7 +25,7 @@ import ( ) // newGeorepSession creates new instance of GeorepSession -func newGeorepSession(mastervolid uuid.UUID, remotevolid uuid.UUID, req georepapi.GeorepCreateReq) *georepapi.GeorepSession { +func newGeorepSession(mastervolid, remotevolid uuid.UUID, req georepapi.GeorepCreateReq) *georepapi.GeorepSession { remoteUser := req.RemoteUser if req.RemoteUser == "" { remoteUser = "root" @@ -49,7 +49,7 @@ func newGeorepSession(mastervolid uuid.UUID, remotevolid uuid.UUID, req georepap } } -func validateMasterAndRemoteIDFormat(ctx context.Context, w http.ResponseWriter, masteridRaw string, remoteidRaw string) (uuid.UUID, uuid.UUID, error) { +func validateMasterAndRemoteIDFormat(ctx context.Context, w http.ResponseWriter, masteridRaw, remoteidRaw string) (uuid.UUID, uuid.UUID, error) { // Validate UUID format of Master and Remote Volume ID masterid := uuid.Parse(masteridRaw) if masterid == nil { @@ -163,6 +163,9 @@ func georepCreateHandler(w http.ResponseWriter, r *http.Request) { geoSession.Options["gluster-logdir"] = path.Join(config.GetString("logdir"), "glusterfs") } + //store volinfo to revert back changes in case of transaction failure + oldvolinfo := vol + // Required Volume Options vol.Options["marker.xtime"] = "on" vol.Options["marker.gsync-force-xtime"] = "on" @@ -171,11 +174,19 @@ func georepCreateHandler(w http.ResponseWriter, r *http.Request) { // Workaround till {{ volume.id }} added to the marker options table vol.Options["marker.volume-uuid"] = vol.ID.String() + //save volume information for transaction failure scenario + if err := txn.Ctx.Set("oldvolinfo", oldvolinfo); err != nil { + logger.WithError(err).Error("failed to set oldvolinfo in transaction context") + restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) + return + } + txn.Nodes = vol.Nodes() txn.Steps = []*transaction.Step{ { - DoFunc: "vol-option.UpdateVolinfo", - Nodes: []uuid.UUID{gdctx.MyUUID}, + DoFunc: "vol-option.UpdateVolinfo", + UndoFunc: "vol-option.UpdateVolinfo.Undo", + Nodes: []uuid.UUID{gdctx.MyUUID}, }, { DoFunc: "vol-option.NotifyVolfileChange", @@ -198,10 +209,8 @@ func georepCreateHandler(w http.ResponseWriter, r *http.Request) { return } - err = txn.Do() - if err != nil { - logger.WithFields(log.Fields{ - "error": err.Error(), + if err = txn.Do(); err != nil { + logger.WithError(err).WithFields(log.Fields{ "mastervolid": masterid, "remotevolid": remoteid, }).Error("failed to create geo-replication session") @@ -334,8 +343,7 @@ func georepActionHandler(w http.ResponseWriter, r *http.Request, action actionTy err = txn.Do() if err != nil { - logger.WithFields(log.Fields{ - "error": err.Error(), + logger.WithError(err).WithFields(log.Fields{ "mastervolid": masterid, "remotevolid": remoteid, }).Error("failed to " + action.String() + " geo-replication session") @@ -436,8 +444,7 @@ func georepDeleteHandler(w http.ResponseWriter, r *http.Request) { err = txn.Do() if err != nil { - logger.WithFields(log.Fields{ - "error": err.Error(), + logger.WithError(err).WithFields(log.Fields{ "mastervolid": masterid, "remotevolid": remoteid, }).Error("failed to delete geo-replication session") @@ -516,9 +523,7 @@ func georepStatusHandler(w http.ResponseWriter, r *http.Request) { err = txn.Do() if err != nil { // TODO: Handle partial failure if a few glusterd's down - - logger.WithFields(log.Fields{ - "error": err.Error(), + logger.WithError(err).WithFields(log.Fields{ "mastervolid": masterid, "remotevolid": remoteid, }).Error("failed to get status of geo-replication session") @@ -530,12 +535,16 @@ func georepStatusHandler(w http.ResponseWriter, r *http.Request) { result, err := aggregateGsyncdStatus(txn.Ctx, txn.Nodes) if err != nil { errMsg := "Failed to aggregate gsyncd status results from multiple nodes." - logger.WithField("error", err.Error()).Error("gsyncdStatusHandler:" + errMsg) + logger.WithError(err).Error("gsyncdStatusHandler:" + errMsg) restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, errMsg) return } - for _, b := range vol.GetBricks() { + bricks := vol.GetBricks() + geoSession.Workers = make([]georepapi.GeorepWorker, 0, len(bricks)) + + for _, b := range bricks { + // Set default values to all status fields, If a node or worker is down and // status not available these default values will be sent back in response geoSession.Workers = append(geoSession.Workers, georepapi.GeorepWorker{ @@ -639,8 +648,7 @@ func georepConfigGetHandler(w http.ResponseWriter, r *http.Request) { } out, err := utils.ExecuteCommandOutput(gsyncdCommand, args...) if err != nil { - logger.WithFields(log.Fields{ - "error": err.Error(), + logger.WithError(err).WithFields(log.Fields{ "mastervolid": masterid, "remotevolid": remoteid, }).Error("failed to get session configurations") @@ -650,8 +658,7 @@ func georepConfigGetHandler(w http.ResponseWriter, r *http.Request) { var opts []georepapi.GeorepOption if err = json.Unmarshal(out, &opts); err != nil { - logger.WithFields(log.Fields{ - "error": err.Error(), + logger.WithError(err).WithFields(log.Fields{ "mastervolid": masterid, "remotevolid": remoteid, }).Error("failed to parse configurations") @@ -725,8 +732,7 @@ func georepConfigSetHandler(w http.ResponseWriter, r *http.Request) { val, ok := geoSession.Options[k] if (ok && v != val) || !ok { configWillChange = true - err = checkConfig(k, v) - if err != nil { + if err = checkConfig(k, v); err != nil { restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "Invalid Config Name/Value") return } @@ -803,8 +809,7 @@ func georepConfigSetHandler(w http.ResponseWriter, r *http.Request) { err = txn.Do() if err != nil { - logger.WithFields(log.Fields{ - "error": err.Error(), + logger.WithError(err).WithFields(log.Fields{ "mastervolid": masterid, "remotevolid": remoteid, }).Error("failed to update geo-replication session config") @@ -812,7 +817,7 @@ func georepConfigSetHandler(w http.ResponseWriter, r *http.Request) { return } - var allopts []string + var allopts = make([]string, 0, len(req)) for k, v := range req { allopts = append(allopts, k+"="+v) } @@ -863,10 +868,8 @@ func georepConfigResetHandler(w http.ResponseWriter, r *http.Request) { restartRequired := false // Check if config exists, reset can be done only if it is set before for _, k := range req { - _, ok := geoSession.Options[k] - if ok { + if _, ok := geoSession.Options[k]; ok { configWillChange = true - restartRequired = restartRequiredOnConfigChange(k) } } @@ -939,8 +942,7 @@ func georepConfigResetHandler(w http.ResponseWriter, r *http.Request) { err = txn.Do() if err != nil { - logger.WithFields(log.Fields{ - "error": err.Error(), + logger.WithError(err).WithFields(log.Fields{ "mastervolid": masterid, "remotevolid": remoteid, }).Error("failed to update geo-replication session config") @@ -1007,10 +1009,7 @@ func georepSSHKeyGenerateHandler(w http.ResponseWriter, r *http.Request) { err = txn.Do() if err != nil { - logger.WithFields(log.Fields{ - "error": err.Error(), - "volname": volname, - }).Error("failed to generate SSH Keys") + logger.WithError(err).WithField("volname", volname).Error("failed to generate SSH Keys") restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) return } @@ -1034,10 +1033,7 @@ func georepSSHKeyGetHandler(w http.ResponseWriter, r *http.Request) { sshkeys, err := getSSHPublicKeys(volname) if err != nil { - logger.WithFields(log.Fields{ - "error": err.Error(), - "volname": volname, - }).Error("failed to get SSH public Keys") + logger.WithError(err).WithField("volname", volname).Error("failed to get SSH public Keys") restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) return } @@ -1101,10 +1097,7 @@ func georepSSHKeyPushHandler(w http.ResponseWriter, r *http.Request) { err = txn.Do() if err != nil { - logger.WithFields(log.Fields{ - "error": err.Error(), - "volname": volname, - }).Error("failed to push SSH Keys") + logger.WithError(err).WithField("volname", volname).Error("failed to push SSH Keys") restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) return } diff --git a/plugins/georeplication/store-utils.go b/plugins/georeplication/store-utils.go index e4206ff3b..8b1792721 100644 --- a/plugins/georeplication/store-utils.go +++ b/plugins/georeplication/store-utils.go @@ -42,7 +42,7 @@ func getSession(masterid string, remoteid string) (*georepapi.GeorepSession, err func addOrUpdateSession(v *georepapi.GeorepSession) error { json, e := json.Marshal(v) if e != nil { - log.WithField("error", e).Error("Failed to marshal the Info object") + log.WithError(e).Error("Failed to marshal the Info object") return e } @@ -77,10 +77,7 @@ func getSessionList() ([]*georepapi.GeorepSession, error) { var session georepapi.GeorepSession if err := json.Unmarshal(kv.Value, &session); err != nil { - log.WithFields(log.Fields{ - "session": string(kv.Key), - "error": err, - }).Error("Failed to unmarshal Geo-replication session") + log.WithError(err).WithField("session", string(kv.Key)).Error("Failed to unmarshal Geo-replication session") continue } @@ -94,7 +91,7 @@ func getSessionList() ([]*georepapi.GeorepSession, error) { func addOrUpdateSSHKey(volname string, sshkey georepapi.GeorepSSHPublicKey) error { json, e := json.Marshal(sshkey) if e != nil { - log.WithField("error", e).Error("Failed to marshal the sshkeys object") + log.WithError(e).Error("Failed to marshal the sshkeys object") return e } @@ -110,10 +107,7 @@ func addOrUpdateSSHKey(volname string, sshkey georepapi.GeorepSSHPublicKey) erro func getSSHPublicKeys(volname string) ([]georepapi.GeorepSSHPublicKey, error) { resp, e := store.Get(context.TODO(), georepSSHKeysPrefix+volname, clientv3.WithPrefix()) if e != nil { - log.WithFields(log.Fields{ - "volname": volname, - "error": e, - }).Error("Couldn't retrive SSH Key from the node") + log.WithError(e).WithField("volname", volname).Error("Couldn't retrive SSH Key from the node") return nil, e } diff --git a/plugins/georeplication/transactions.go b/plugins/georeplication/transactions.go index 6cb37bf81..4cbc615d5 100644 --- a/plugins/georeplication/transactions.go +++ b/plugins/georeplication/transactions.go @@ -255,7 +255,7 @@ func configFileGenerate(session *georepapi.GeorepSession) error { } // Remote host and UUID details - var remote []string + var remote = make([]string, 0, len(session.RemoteHosts)) for _, sh := range session.RemoteHosts { remote = append(remote, sh.PeerID.String()+":"+sh.Hostname) } @@ -264,8 +264,9 @@ func configFileGenerate(session *georepapi.GeorepSession) error { ) // Master Bricks details - var master []string - for _, b := range vol.GetBricks() { + bricks := vol.GetBricks() + var master = make([]string, 0, len(bricks)) + for _, b := range bricks { master = append(master, b.PeerID.String()+":"+b.Hostname+":"+b.Path) } confdata = append(confdata, @@ -322,19 +323,18 @@ func txnGeorepConfigFilegen(c transaction.TxnCtx) error { } if restartRequired { - err = gsyncdAction(c, actionStop) - if err != nil { + + if err = gsyncdAction(c, actionStop); err != nil { return err } - err = gsyncdAction(c, actionStart) - if err != nil { + + if err = gsyncdAction(c, actionStart); err != nil { return err } } else { // Restart not required, Generate config file Gsynd will reload // automatically if running - err = configFileGenerate(&session) - if err != nil { + if err = configFileGenerate(&session); err != nil { return err } } @@ -362,8 +362,8 @@ func txnSSHKeysGenerate(c transaction.TxnCtx) error { ) // Create Directory if not exists - err = os.MkdirAll(path.Dir(secretPemFile), os.ModeDir|os.ModePerm) - if err != nil { + + if err = os.MkdirAll(path.Dir(secretPemFile), os.ModeDir|os.ModePerm); err != nil { return err } @@ -392,17 +392,14 @@ func txnSSHKeysGenerate(c transaction.TxnCtx) error { return err } } - data, err = ioutil.ReadFile(tarSSHPemFile + ".pub") - if err != nil { + if data, err = ioutil.ReadFile(tarSSHPemFile + ".pub"); err != nil { return err } sshkey.TarKey = string(data) err = addOrUpdateSSHKey(volname, sshkey) - if err != nil { - return err - } - return nil + + return err } func txnSSHKeysPush(c transaction.TxnCtx) error { diff --git a/plugins/glustershd/rest.go b/plugins/glustershd/rest.go index 84a02bd30..2f54bef62 100644 --- a/plugins/glustershd/rest.go +++ b/plugins/glustershd/rest.go @@ -38,8 +38,8 @@ func runGlfshealBin(volname string, args []string) (string, error) { cmd := exec.Command(path, args...) cmd.Stdout = &out - err = cmd.Run() - if err != nil { + + if err = cmd.Run(); err != nil { return healInfoOutput, err } @@ -103,7 +103,7 @@ func selfhealInfoHandler(w http.ResponseWriter, r *http.Request) { } healInfoOutput, err := getHealInfo(volname, option) if err != nil { - logger.WithField("volname", volname).Debug("heal info operation failed") + logger.WithError(err).WithField("volname", volname).Error("heal info operation failed") restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, "heal info operation failed") return } From 94b631c2f7be24ae11589cfd6c1c3228aeb833d0 Mon Sep 17 00:00:00 2001 From: vpandey-RH Date: Tue, 26 Jun 2018 12:07:56 +0530 Subject: [PATCH 7/7] Index/Full Heal API and CLI --- e2e/glustershd_test.go | 14 +++- glustercli/cmd/glustershd.go | 34 +++++++++ pkg/restclient/glustershd.go | 15 ++++ plugins/glustershd/glustershd.go | 11 ++- plugins/glustershd/init.go | 12 ++- plugins/glustershd/rest.go | 82 ++++++++++++++++++++- plugins/glustershd/transactions.go | 114 +++++++++++++++++++++++++++++ 7 files changed, 272 insertions(+), 10 deletions(-) create mode 100644 plugins/glustershd/transactions.go diff --git a/e2e/glustershd_test.go b/e2e/glustershd_test.go index 817fa8c47..facdc6aed 100644 --- a/e2e/glustershd_test.go +++ b/e2e/glustershd_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestSelfHealInfo(t *testing.T) { +func TestSelfHeal(t *testing.T) { r := require.New(t) gds, err := setupCluster("./config/1.toml") @@ -55,9 +55,17 @@ func TestSelfHealInfo(t *testing.T) { _, err = client.SelfHealInfo(vol1.Name, "split-brain-info") r.Nil(err) + var optionReq api.VolOptionReq + + optionReq.Options = map[string]string{"replicate.self-heal-daemon": "on"} + optionReq.Advanced = true + + r.Nil(client.VolumeSet(vol1.Name, optionReq)) + r.Nil(client.SelfHeal(vol1.Name, "index")) + r.Nil(client.SelfHeal(vol1.Name, "full")) + // Stop Volume r.Nil(client.VolumeStop(vol1.Name), "Volume stop failed") // delete volume - err = client.VolumeDelete(vol1.Name) - r.Nil(err) + r.Nil(client.VolumeDelete(vol1.Name)) } diff --git a/glustercli/cmd/glustershd.go b/glustercli/cmd/glustershd.go index 73d6d3e94..79318699b 100644 --- a/glustercli/cmd/glustershd.go +++ b/glustercli/cmd/glustershd.go @@ -26,6 +26,8 @@ func init() { selfHealInfoCmd.Flags().BoolVar(&flagSummaryInfo, "info-summary", false, "Heal Info Summary") selfHealInfoCmd.Flags().BoolVar(&flagSplitBrainInfo, "split-brain-info", false, "Heal Split Brain Info") selfHealCmd.AddCommand(selfHealInfoCmd) + selfHealCmd.AddCommand(selfHealIndexCmd) + selfHealCmd.AddCommand(selfHealFullCmd) volumeCmd.AddCommand(selfHealCmd) } @@ -82,3 +84,35 @@ var selfHealInfoCmd = &cobra.Command{ } }, } + +var selfHealIndexCmd = &cobra.Command{ + Use: "index ", + Short: "Index Heal", + Long: "CLI command to trigger index heal on a volume", + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + var err error + volname := args[0] + err = client.SelfHeal(volname, "index") + if err != nil { + failure(fmt.Sprintf("Failed to run heal for volume %s\n", volname), err, 1) + } + fmt.Println("Heal on volume has been successfully launched. Use heal info to check status") + }, +} + +var selfHealFullCmd = &cobra.Command{ + Use: "full ", + Short: "Full Heal", + Long: "CLI command to trigger full heal on a volume", + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + var err error + volname := args[0] + err = client.SelfHeal(volname, "full") + if err != nil { + failure(fmt.Sprintf("Failed to run heal for volume %s\n", volname), err, 1) + } + fmt.Println("Heal on volume has been successfully launched. Use heal info to check status") + }, +} diff --git a/pkg/restclient/glustershd.go b/pkg/restclient/glustershd.go index d93882190..7e1e912c5 100644 --- a/pkg/restclient/glustershd.go +++ b/pkg/restclient/glustershd.go @@ -22,3 +22,18 @@ func (c *Client) SelfHealInfo(params ...string) ([]glustershdapi.BrickHealInfo, err := c.get(url, nil, http.StatusOK, &output) return output, err } + +// SelfHeal sends request to start the heal process on the specified volname +func (c *Client) SelfHeal(volname string, healType string) error { + var url string + switch healType { + case "index": + url = fmt.Sprintf("/v1/volumes/%s/heal", volname) + case "full": + url = fmt.Sprintf("/v1/volumes/%s/heal?type=%s", volname, healType) + default: + return errors.New("invalid parameters") + } + + return c.post(url, nil, http.StatusOK, nil) +} diff --git a/plugins/glustershd/glustershd.go b/plugins/glustershd/glustershd.go index 18c652a94..578c48cf8 100644 --- a/plugins/glustershd/glustershd.go +++ b/plugins/glustershd/glustershd.go @@ -44,15 +44,13 @@ func (shd *Glustershd) Args() []string { volFileID := "gluster/glustershd" logFile := path.Join(config.GetString("logdir"), "glusterfs", "glustershd.log") - glusterdSockDir := config.GetString("rundir") - socketfilepath := fmt.Sprintf("%s/%x.socket", glusterdSockDir, xxhash.Sum64String(gdctx.MyUUID.String())) shd.args = []string{} shd.args = append(shd.args, "-s", shost) shd.args = append(shd.args, "--volfile-id", volFileID) shd.args = append(shd.args, "-p", shd.PidFile()) shd.args = append(shd.args, "-l", logFile) - shd.args = append(shd.args, "-S", socketfilepath) + shd.args = append(shd.args, "-S", shd.SocketFile()) shd.args = append(shd.args, "--xlator-option", fmt.Sprintf("*replicate*.node-uuid=%s", gdctx.MyUUID)) @@ -62,7 +60,12 @@ func (shd *Glustershd) Args() []string { // SocketFile returns path to the socket file used for IPC. func (shd *Glustershd) SocketFile() string { - return "" + if shd.socketfilepath != "" { + return shd.socketfilepath + } + shd.socketfilepath = fmt.Sprintf("%s/shd-%x.socket", config.GetString("rundir"), xxhash.Sum64String(gdctx.MyUUID.String())) + + return shd.socketfilepath } // PidFile returns path to the pid file of self heal process. diff --git a/plugins/glustershd/init.go b/plugins/glustershd/init.go index 8a05ac5db..a1e7bcd6e 100644 --- a/plugins/glustershd/init.go +++ b/plugins/glustershd/init.go @@ -2,6 +2,7 @@ package glustershd import ( "github.com/gluster/glusterd2/glusterd2/servers/rest/route" + "github.com/gluster/glusterd2/glusterd2/transaction" "github.com/gluster/glusterd2/pkg/utils" glustershdapi "github.com/gluster/glusterd2/plugins/glustershd/api" ) @@ -21,21 +22,28 @@ func (p *Plugin) RestRoutes() route.Routes { route.Route{ Name: "SelfHealInfo", Method: "GET", - Pattern: "/volumes/{name}/{opts}/heal-info", + Pattern: "/volumes/{volname}/{opts}/heal-info", Version: 1, ResponseType: utils.GetTypeString(([]glustershdapi.BrickHealInfo)(nil)), HandlerFunc: selfhealInfoHandler}, route.Route{ Name: "SelfHealInfo2", Method: "GET", - Pattern: "/volumes/{name}/heal-info", + Pattern: "/volumes/{volname}/heal-info", Version: 1, ResponseType: utils.GetTypeString(([]glustershdapi.BrickHealInfo)(nil)), HandlerFunc: selfhealInfoHandler}, + route.Route{ + Name: "SelfHeal", + Method: "POST", + Pattern: "/volumes/{volname}/heal", + Version: 1, + HandlerFunc: selfHealHandler}, } } // RegisterStepFuncs registers transaction step functions with // Glusterd Transaction framework func (p *Plugin) RegisterStepFuncs() { + transaction.RegisterStepFunc(txnSelfHeal, "selfheal.Heal") } diff --git a/plugins/glustershd/rest.go b/plugins/glustershd/rest.go index 2f54bef62..9cfa3adb6 100644 --- a/plugins/glustershd/rest.go +++ b/plugins/glustershd/rest.go @@ -20,6 +20,13 @@ import ( config "github.com/spf13/viper" ) +type healTypes int8 + +const ( + indexHeal healTypes = 1 + iota + fullHeal +) + func runGlfshealBin(volname string, args []string) (string, error) { var out bytes.Buffer var buffer bytes.Buffer @@ -59,7 +66,7 @@ func getHealInfo(volname string, option string) (string, error) { func selfhealInfoHandler(w http.ResponseWriter, r *http.Request) { var option string p := mux.Vars(r) - volname := p["name"] + volname := p["volname"] if val, ok := p["opts"]; ok { option = val } @@ -121,3 +128,76 @@ func selfhealInfoHandler(w http.ResponseWriter, r *http.Request) { restutils.SendHTTPResponse(ctx, w, http.StatusOK, &info.Bricks) } + +func selfHealHandler(w http.ResponseWriter, r *http.Request) { + // Collect inputs from URL + volname := mux.Vars(r)["volname"] + + ctx := r.Context() + logger := gdctx.GetReqLogger(ctx) + + healType := indexHeal + if heal, ok := r.URL.Query()["type"]; ok { + switch heal[0] { + case "index": + healType = indexHeal + case "full": + healType = fullHeal + default: + restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "heal type can only be either index or full") + return + } + } + txn, err := transaction.NewTxnWithLocks(ctx, volname) + if err != nil { + status, err := restutils.ErrToStatusCode(err) + restutils.SendHTTPError(ctx, w, status, err) + return + } + defer txn.Done() + + // Validate volume existence + volinfo, err := volume.GetVolume(volname) + if err != nil { + status, err := restutils.ErrToStatusCode(err) + restutils.SendHTTPError(ctx, w, status, err) + return + } + + // Check if volume is started + if volinfo.State != volume.VolStarted { + restutils.SendHTTPError(ctx, w, http.StatusBadRequest, gderrors.ErrVolNotStarted) + return + } + + // Check if self heal is already enabled + if !isHealEnabled(volinfo) { + restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "self heal option is disabled for this volume") + return + } + + if err := txn.Ctx.Set("volinfo", volinfo); err != nil { + restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) + return + } + + if err := txn.Ctx.Set("healType", healType); err != nil { + restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) + return + } + + txn.Steps = []*transaction.Step{ + { + DoFunc: "selfheal.Heal", + Nodes: volinfo.Nodes(), + }, + } + + if err = txn.Do(); err != nil { + logger.WithError(err).Error("failed to start healing process") + status, err := restutils.ErrToStatusCode(err) + restutils.SendHTTPError(ctx, w, status, err) + return + } + restutils.SendHTTPResponse(ctx, w, http.StatusOK, nil) +} diff --git a/plugins/glustershd/transactions.go b/plugins/glustershd/transactions.go new file mode 100644 index 000000000..c0e73f379 --- /dev/null +++ b/plugins/glustershd/transactions.go @@ -0,0 +1,114 @@ +package glustershd + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/gluster/glusterd2/glusterd2/brick" + "github.com/gluster/glusterd2/glusterd2/daemon" + "github.com/gluster/glusterd2/glusterd2/gdctx" + "github.com/gluster/glusterd2/glusterd2/servers/sunrpc/dict" + "github.com/gluster/glusterd2/glusterd2/transaction" + "github.com/gluster/glusterd2/glusterd2/volume" +) + +func getHxlChildrenCount(volinfo *volume.Volinfo) (int, string) { + if volinfo.Type == volume.Replicate || volinfo.Type == volume.DistReplicate { + return volinfo.Subvols[0].ReplicaCount, "replicate" + } + return volinfo.Subvols[0].DisperseCount, "disperse" +} + +func addHxlatorToDict(reqDict map[string]string, volinfo *volume.Volinfo, index int, count int, xlType string) map[string]string { + key := fmt.Sprintf("xl-%d", count) + xName := fmt.Sprintf("%s-%s-%d", volinfo.Name, xlType, index) + reqDict[key] = xName + reqDict[xName] = strconv.Itoa(index) + return reqDict +} + +func selectHxlatorsWithBricks(volinfo *volume.Volinfo, healType int) map[string]string { + index := 1 + hxlatorCount := 0 + add := false + reqDict := make(map[string]string) + reqDict["heal-op"] = strconv.Itoa(healType) + reqDict["xl-op"] = reqDict["heal-op"] + reqDict["volname"] = volinfo.Name + reqDict["sync-mgmt-operation"] = strconv.Itoa(20) + reqDict["vol-id"] = volinfo.ID.String() + hxlChildren, xlType := getHxlChildrenCount(volinfo) + volBricks := volinfo.GetBricks() + for brick := range volBricks { + hostKey := fmt.Sprintf("%d-hostname", index-1) + brickPathKey := fmt.Sprintf("%d-path", index-1) + reqDict[hostKey] = volBricks[brick].Hostname + reqDict[brickPathKey] = volBricks[brick].Path + if bytes.Equal(volBricks[brick].PeerID, gdctx.MyUUID) { + add = true + } + if index%hxlChildren == 0 { + if add { + reqDict = addHxlatorToDict(reqDict, volinfo, (index-1)/hxlChildren, hxlatorCount, xlType) + hxlatorCount++ + } + add = false + } + index++ + } + reqDict["count"] = strconv.Itoa(hxlatorCount) + return reqDict +} + +func txnSelfHeal(c transaction.TxnCtx) error { + + var volinfo volume.Volinfo + if err := c.Get("volinfo", &volinfo); err != nil { + return err + } + + var healType int + if err := c.Get("healType", &healType); err != nil { + return err + } + + volname := volinfo.Name + + glustershDaemon, err := newGlustershd() + if err != nil { + return err + } + + c.Logger().WithField("volume", volname).Info("Starting Heal") + + client, err := daemon.GetRPCClient(glustershDaemon) + if err != nil { + c.Logger().WithError(err).WithField( + "volume", volname).Error("failed to connect to glustershd") + return err + } + + reqDict := make(map[string]string) + req := &brick.GfBrickOpReq{ + Name: "", + Op: int(brick.OpBrickXlatorOp), + } + reqDict = selectHxlatorsWithBricks(&volinfo, healType) + req.Input, err = dict.Serialize(reqDict) + if err != nil { + c.Logger().WithError(err).WithField( + "volume", volname).Error("failed to serialize dict for index heal") + return err + } + + var rsp brick.GfBrickOpRsp + err = client.Call("Brick.OpBrickXlatorOp", req, &rsp) + if err != nil || rsp.OpRet != 0 { + c.Logger().WithError(err).WithField( + "volume", volname).Error("failed to send index heal RPC") + return err + } + + return nil +}