Skip to content
This repository has been archived by the owner on Mar 26, 2020. It is now read-only.

Plugin architecture for brick Provisioners #1256

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion e2e/smartvol_ops_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func brickSizeTest(brickpath string, min uint64, max uint64) error {

func checkZeroLvs(r *require.Assertions) {
for i := 1; i < 3; i++ {
nlv, err := numberOfLvs(fmt.Sprintf("vg-dev-gluster_loop%d", i))
nlv, err := numberOfLvs(fmt.Sprintf("gluster-dev-gluster_loop%d", i))
r.Nil(err)
if err == nil {
r.Equal(0, nlv)
Expand Down
2 changes: 2 additions & 0 deletions glusterd2/brick/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ type MountInfo struct {
// Brickinfo is the static information about the brick
type Brickinfo struct {
ID uuid.UUID
Name string
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is stored in this variable ?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

<volname>_s<subvol-number>_b<brick-number> similar to subvolume name

Hostname string
PeerID uuid.UUID
Path string
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this path represents the path where the brick is mounted?

Expand All @@ -42,6 +43,7 @@ type Brickinfo struct {
Type Type
Decommissioned bool
PType ProvisionType
Device string
MountInfo
}

Expand Down
34 changes: 15 additions & 19 deletions glusterd2/bricksplanner/planner.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import (

"github.com/gluster/glusterd2/glusterd2/volume"
"github.com/gluster/glusterd2/pkg/api"
"github.com/gluster/glusterd2/plugins/device/deviceutils"
"github.com/gluster/glusterd2/pkg/lvmutils"

config "github.com/spf13/viper"
)
Expand Down Expand Up @@ -76,7 +76,7 @@ func getBricksLayout(req *api.VolCreateReq) ([]api.SubvolReq, error) {

// User input will be in MBs, convert to KBs for all
// internal usage
subvolSize := deviceutils.MbToKb(req.Size)
subvolSize := lvmutils.MbToKb(req.Size)
if numSubvols > 1 {
subvolSize = subvolSize / uint64(numSubvols)
}
Expand Down Expand Up @@ -118,19 +118,13 @@ func getBricksLayout(req *api.VolCreateReq) ([]api.SubvolReq, error) {
for j := 0; j < subvolplanner.BricksCount(); j++ {
eachBrickSize := subvolplanner.BrickSize(j)
brickType := subvolplanner.BrickType(j)
eachBrickTpSize := uint64(float64(eachBrickSize) * req.SnapshotReserveFactor)

bricks = append(bricks, api.BrickReq{
Type: brickType,
Path: fmt.Sprintf("%s/%s/subvol%d/brick%d/brick", bricksMountRoot, req.Name, i+1, j+1),
Mountdir: "/brick",
TpName: fmt.Sprintf("tp_%s_s%d_b%d", req.Name, i+1, j+1),
LvName: fmt.Sprintf("brick_%s_s%d_b%d", req.Name, i+1, j+1),
Size: eachBrickSize,
TpSize: eachBrickTpSize,
TpMetadataSize: deviceutils.GetPoolMetadataSize(eachBrickTpSize),
FsType: "xfs",
MntOpts: "rw,inode64,noatime,nouuid",
Type: brickType,
Path: fmt.Sprintf("%s/%s/subvol%d/brick%d/brick", bricksMountRoot, req.Name, i+1, j+1),
Mountdir: "/brick",
Name: fmt.Sprintf("%s_s%d_b%d", req.Name, i+1, j+1),
Size: eachBrickSize,
})
}

Expand Down Expand Up @@ -177,14 +171,15 @@ func PlanBricks(req *api.VolCreateReq) error {
// with device with expected space available.
numBricksAllocated := 0
for bidx, b := range sv.Bricks {
totalsize := b.TpSize + b.TpMetadataSize
tpSize := uint64(float64(b.Size) * req.SnapshotReserveFactor)
tpMetadataSize := lvmutils.GetTpMetadataSize(tpSize)
totalsize := tpSize + tpMetadataSize

for _, vg := range availableVgs {
_, zoneUsed := zones[vg.Zone]
if vg.AvailableSize >= totalsize && !zoneUsed && !vg.Used {
subvols[idx].Bricks[bidx].PeerID = vg.PeerID
subvols[idx].Bricks[bidx].VgName = vg.Name
subvols[idx].Bricks[bidx].DevicePath = "/dev/" + vg.Name + "/" + b.LvName
subvols[idx].Bricks[bidx].Device = vg.Device

zones[vg.Zone] = struct{}{}
numBricksAllocated++
Expand All @@ -205,14 +200,15 @@ func PlanBricks(req *api.VolCreateReq) error {
// but enough space is available in the devices
for bidx := numBricksAllocated; bidx < len(sv.Bricks); bidx++ {
b := sv.Bricks[bidx]
totalsize := b.TpSize + b.TpMetadataSize
tpSize := uint64(float64(b.Size) * req.SnapshotReserveFactor)
tpMetadataSize := lvmutils.GetTpMetadataSize(tpSize)
totalsize := tpSize + tpMetadataSize

for _, vg := range availableVgs {
_, zoneUsed := zones[vg.Zone]
if vg.AvailableSize >= totalsize && !zoneUsed {
subvols[idx].Bricks[bidx].PeerID = vg.PeerID
subvols[idx].Bricks[bidx].VgName = vg.Name
subvols[idx].Bricks[bidx].DevicePath = "/dev/" + vg.Name + "/" + b.LvName
subvols[idx].Bricks[bidx].Device = vg.Device

zones[vg.Zone] = struct{}{}
numBricksAllocated++
Expand Down
18 changes: 5 additions & 13 deletions glusterd2/bricksplanner/utils.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package bricksplanner

import (
"encoding/json"
"sort"
"strings"

Expand All @@ -10,6 +9,7 @@ import (
"github.com/gluster/glusterd2/pkg/api"
"github.com/gluster/glusterd2/pkg/utils"
deviceapi "github.com/gluster/glusterd2/plugins/device/api"
"github.com/gluster/glusterd2/plugins/device/deviceutils"
)

var subvolPlanners = make(map[string]SubvolPlanner)
Expand All @@ -24,8 +24,7 @@ type SubvolPlanner interface {

// Vg represents Virtual Volume Group
type Vg struct {
Name string
DeviceName string
Device string
PeerID string
Zone string
State string
Expand Down Expand Up @@ -71,14 +70,8 @@ func getAvailableVgs(req *api.VolCreateReq) ([]Vg, error) {
continue
}

devicesRaw, exists := p.Metadata["_devices"]
if !exists {
// No device registered for this peer
continue
}

var deviceInfo []deviceapi.Info
if err := json.Unmarshal([]byte(devicesRaw), &deviceInfo); err != nil {
deviceInfo, err := deviceutils.GetDevices(p.ID.String())
if err != nil {
return nil, err
}

Expand All @@ -89,8 +82,7 @@ func getAvailableVgs(req *api.VolCreateReq) ([]Vg, error) {
}

vgs = append(vgs, Vg{
DeviceName: d.Name,
Name: d.VgName,
Device: d.Device,
PeerID: p.ID.String(),
Zone: peerzone,
State: d.State,
Expand Down
1 change: 1 addition & 0 deletions glusterd2/commands/volumes/volume-create-txn.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ func newVolinfo(req *api.VolCreateReq) (*volume.Volinfo, error) {
DistCount: len(req.Subvols),
SnapList: []string{},
SnapshotReserveFactor: req.SnapshotReserveFactor,
Provisioner: req.Provisioner,
Auth: volume.VolAuth{
Username: uuid.NewRandom().String(),
Password: uuid.NewRandom().String(),
Expand Down
4 changes: 3 additions & 1 deletion glusterd2/commands/volumes/volume-delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,9 @@ func volumeDeleteHandler(w http.ResponseWriter, r *http.Request) {
return
}

bricksAutoProvisioned := volinfo.IsAutoProvisioned() || volinfo.IsSnapshotProvisioned()
// TODO: Include volinfo.IsSnapshotProvisioned() once
// Snapshot integrated with Provisioner interface
bricksAutoProvisioned := volinfo.IsAutoProvisioned()
txn.Steps = []*transaction.Step{
{
DoFunc: "vol-delete.CleanBricks",
Expand Down
Loading