diff --git a/e2e/georep_test.go b/e2e/georep_test.go index a45aa88f9..6312e8e86 100644 --- a/e2e/georep_test.go +++ b/e2e/georep_test.go @@ -17,7 +17,7 @@ func TestGeorepCreateDelete(t *testing.T) { r.Nil(err) defer teardownCluster(gds) - brickDir, err := ioutil.TempDir("", "TestGeorepCreate") + brickDir, err := ioutil.TempDir(baseWorkdir, t.Name()) r.Nil(err) defer os.RemoveAll(brickDir) diff --git a/e2e/glustershd_test.go b/e2e/glustershd_test.go index b975429d9..3a9b9e00a 100644 --- a/e2e/glustershd_test.go +++ b/e2e/glustershd_test.go @@ -16,7 +16,7 @@ func TestSelfHealInfo(t *testing.T) { r.Nil(err) defer teardownCluster(gds) - brickDir, err := ioutil.TempDir("", t.Name()) + brickDir, err := ioutil.TempDir(baseWorkdir, t.Name()) r.Nil(err) defer os.RemoveAll(brickDir) diff --git a/e2e/quota_enable.go b/e2e/quota_enable.go index 659d5650c..9dcd4342e 100644 --- a/e2e/quota_enable.go +++ b/e2e/quota_enable.go @@ -20,7 +20,7 @@ func testQuotaEnable(t *testing.T) { r.Nil(err) defer teardownCluster(gds) - brickDir, err := ioutil.TempDir("", t.Name()) + brickDir, err := ioutil.TempDir(baseWorkdir, t.Name()) r.Nil(err) defer os.RemoveAll(brickDir) diff --git a/e2e/restart_test.go b/e2e/restart_test.go index f1eac6696..d37851ddd 100644 --- a/e2e/restart_test.go +++ b/e2e/restart_test.go @@ -17,7 +17,7 @@ func TestRestart(t *testing.T) { r.Nil(err) r.True(gd.IsRunning()) - dir, err := ioutil.TempDir("", "") + dir, err := ioutil.TempDir(baseWorkdir, t.Name()) r.Nil(err) defer os.RemoveAll(dir) diff --git a/e2e/smartvol_ops_test.go b/e2e/smartvol_ops_test.go index dfe1f9df6..890528f3f 100644 --- a/e2e/smartvol_ops_test.go +++ b/e2e/smartvol_ops_test.go @@ -226,15 +226,15 @@ func TestSmartVolume(t *testing.T) { client = initRestclient(gds[0].ClientAddress) - tmpDir, err = ioutil.TempDir("", t.Name()) + devicesDir, err := ioutil.TempDir(baseWorkdir, t.Name()) r.Nil(err) - t.Logf("Using temp dir: %s", tmpDir) + t.Logf("Using temp dir: %s", devicesDir) // Device Setup // Around 150MB will be reserved during pv/vg creation, create device with more size - r.Nil(prepareLoopDevice(baseWorkdir+"/gluster_dev1.img", "1", "400M")) - r.Nil(prepareLoopDevice(baseWorkdir+"/gluster_dev2.img", "2", "400M")) - r.Nil(prepareLoopDevice(baseWorkdir+"/gluster_dev3.img", "3", "400M")) + r.Nil(prepareLoopDevice(devicesDir+"/gluster_dev1.img", "1", "400M")) + r.Nil(prepareLoopDevice(devicesDir+"/gluster_dev2.img", "2", "400M")) + r.Nil(prepareLoopDevice(devicesDir+"/gluster_dev3.img", "3", "400M")) _, err = client.DeviceAdd(gds[0].PeerID(), "/dev/gluster_loop1") r.Nil(err) diff --git a/e2e/utils_test.go b/e2e/utils_test.go index f8828f76d..bf77126a8 100644 --- a/e2e/utils_test.go +++ b/e2e/utils_test.go @@ -26,6 +26,7 @@ type gdProcess struct { ClientAddress string `toml:"clientaddress"` PeerAddress string `toml:"peeraddress"` Workdir string `toml:"workdir"` + LocalStateDir string `toml:"localstatedir"` Rundir string `toml:"rundir"` uuid string } @@ -41,7 +42,7 @@ func (g *gdProcess) Stop() error { return g.Cmd.Process.Kill() } -func (g *gdProcess) UpdateDirs() { +func (g *gdProcess) updateDirs() { g.Workdir = path.Clean(g.Workdir) if !path.IsAbs(g.Workdir) { g.Workdir = path.Join(baseWorkdir, g.Workdir) @@ -50,6 +51,10 @@ func (g *gdProcess) UpdateDirs() { if !path.IsAbs(g.Rundir) { g.Rundir = path.Join(baseWorkdir, g.Rundir) } + g.LocalStateDir = path.Clean(g.LocalStateDir) + if !path.IsAbs(g.LocalStateDir) { + g.LocalStateDir = path.Join(baseWorkdir, g.LocalStateDir) + } } func (g *gdProcess) EraseWorkdir() error { @@ -117,7 +122,10 @@ func spawnGlusterd(configFilePath string, cleanStart bool) (*gdProcess, error) { return nil, err } - g.UpdateDirs() + // The config files in e2e/config contain relative paths, convert them + // to absolute paths. + g.updateDirs() + if cleanStart { g.EraseWorkdir() // cleanup leftovers from previous test } @@ -133,6 +141,7 @@ func spawnGlusterd(configFilePath string, cleanStart bool) (*gdProcess, error) { g.Cmd = exec.Command(path.Join(binDir, "glusterd2"), "--config", absConfigFilePath, "--workdir", g.Workdir, + "--localstatedir", g.LocalStateDir, "--rundir", g.Rundir, "--logdir", path.Join(g.Workdir, "log"), "--logfile", "glusterd2.log") @@ -320,7 +329,6 @@ func loopDevicesCleanup(t *testing.T) error { cleanupAllBrickMounts(t) cleanupAllGlusterVgs(t) cleanupAllGlusterPvs(t) - cleanupAllGlusterPvs(t) // Cleanup device files devicefiles, err := filepath.Glob(baseWorkdir + "/*.img") diff --git a/glustercli/cmd/volume.go b/glustercli/cmd/volume.go index d7108eff4..38140c918 100644 --- a/glustercli/cmd/volume.go +++ b/glustercli/cmd/volume.go @@ -26,6 +26,7 @@ const ( helpVolumeInfoCmd = "Get Gluster Volume Info" helpVolumeListCmd = "List all Gluster Volumes" helpVolumeStatusCmd = "Get Gluster Volume Status" + helpVolumeSizeCmd = "Get Gluster Volume Size Usage" helpVolumeExpandCmd = "Expand a Gluster Volume" helpVolumeEditCmd = "Edit metadata (key-value pairs) of a volume. Glusterd2 will not interpret these key and value in any way" ) @@ -67,6 +68,7 @@ func init() { volumeCmd.AddCommand(volumeGetCmd) volumeCmd.AddCommand(volumeResetCmd) + volumeCmd.AddCommand(volumeSizeCmd) volumeInfoCmd.Flags().StringVar(&flagCmdFilterKey, "key", "", "Filter by metadata key") volumeInfoCmd.Flags().StringVar(&flagCmdFilterValue, "value", "", "Filter by metadata value") @@ -432,6 +434,28 @@ var volumeStatusCmd = &cobra.Command{ }, } +var volumeSizeCmd = &cobra.Command{ + Use: "size", + Short: helpVolumeSizeCmd, + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + volname := cmd.Flags().Args()[0] + vol, err := client.VolumeStatus(volname) + if err != nil { + if verbose { + log.WithFields(log.Fields{ + "error": err.Error(), + }).Error("error getting volume size") + } + failure("Error getting Volume size", err, 1) + } + fmt.Println("Volume:", volname) + fmt.Printf("Capacity: %d bytes\n", vol.Size.Capacity) + fmt.Printf("Used: %d bytes\n", vol.Size.Used) + fmt.Printf("Free: %d bytes\n", vol.Size.Free) + }, +} + var volumeExpandCmd = &cobra.Command{ Use: "add-brick", Short: helpVolumeExpandCmd, diff --git a/plugins/device/deviceutils/store-utils.go b/plugins/device/deviceutils/store-utils.go index 3cfc79317..ebf33fd4c 100644 --- a/plugins/device/deviceutils/store-utils.go +++ b/plugins/device/deviceutils/store-utils.go @@ -28,22 +28,31 @@ func GetDevices(peerIds ...string) ([]deviceapi.Info, error) { return nil, err } } - var devices []deviceapi.Info for _, peerInfo := range peers { - var deviceInfo []deviceapi.Info - if len(peerInfo.Metadata["_devices"]) > 0 { - if err := json.Unmarshal([]byte(peerInfo.Metadata["_devices"]), &deviceInfo); err != nil { - return nil, err - } - devices = append(devices, deviceInfo...) + deviceInfo, err := GetDevicesFromPeer(peerInfo) + if err != nil { + return nil, err } + devices = append(devices, deviceInfo...) } return devices, nil } -// DeviceExist checks the given device existence -func DeviceExist(reqDevice string, devices []deviceapi.Info) bool { +// GetDevicesFromPeer returns devices from peer object. +func GetDevicesFromPeer(peerInfo *peer.Peer) ([]deviceapi.Info, error) { + + var deviceInfo []deviceapi.Info + if _, exists := peerInfo.Metadata["_devices"]; exists { + if err := json.Unmarshal([]byte(peerInfo.Metadata["_devices"]), &deviceInfo); err != nil { + return nil, err + } + } + return deviceInfo, nil +} + +// DeviceInList checks whether the given device is in list of devices or not. +func DeviceInList(reqDevice string, devices []deviceapi.Info) bool { for _, key := range devices { if reqDevice == key.Name { return true diff --git a/plugins/device/rest.go b/plugins/device/rest.go index 147e15344..f9e6e34b3 100644 --- a/plugins/device/rest.go +++ b/plugins/device/rest.go @@ -1,7 +1,6 @@ package device import ( - "encoding/json" "net/http" "github.com/gluster/glusterd2/glusterd2/gdctx" @@ -52,19 +51,17 @@ func deviceAddHandler(w http.ResponseWriter, r *http.Request) { return } - if _, exists := peerInfo.Metadata["_devices"]; exists { - var devices []deviceapi.Info - err = json.Unmarshal([]byte(peerInfo.Metadata["_devices"]), &devices) - if err != nil { - logger.WithError(err).WithField("peerid", peerID).Error(err) - restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) - return - } - if deviceutils.DeviceExist(req.Device, devices) { - logger.WithError(err).WithField("device", req.Device).Error("Device already exists") - restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "Device already exists") - return - } + devices, err := deviceutils.GetDevicesFromPeer(peerInfo) + if err != nil { + logger.WithError(err).WithField("peerid", peerID).Error("Failed to get device from Peer") + restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err) + return + } + + if deviceutils.DeviceInList(req.Device, devices) { + logger.WithError(err).WithField("device", req.Device).Error("Device already exists") + restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "Device already exists") + return } txn.Nodes = []uuid.UUID{peerInfo.ID}