diff --git a/cmd/minikube/cmd/mount.go b/cmd/minikube/cmd/mount.go index 4884da27a045..e6bbf048b0a2 100644 --- a/cmd/minikube/cmd/mount.go +++ b/cmd/minikube/cmd/mount.go @@ -108,7 +108,6 @@ var mountCmd = &cobra.Command{ exit.WithError("Error getting config", err) } host, err := api.Load(cc.Name) - if err != nil { exit.WithError("Error loading api", err) } diff --git a/cmd/minikube/cmd/pause.go b/cmd/minikube/cmd/pause.go new file mode 100644 index 000000000000..e44cc79ed7c1 --- /dev/null +++ b/cmd/minikube/cmd/pause.go @@ -0,0 +1,103 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "os" + "strings" + + "github.com/golang/glog" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/out" +) + +var ( + namespaces []string + allNamespaces bool +) + +// pauseCmd represents the docker-pause command +var pauseCmd = &cobra.Command{ + Use: "pause", + Short: "pause containers", + Run: runPause, +} + +func runPause(cmd *cobra.Command, args []string) { + cname := viper.GetString(config.MachineProfile) + api, err := machine.NewAPIClient() + if err != nil { + exit.WithError("Error getting client", err) + } + defer api.Close() + cc, err := config.Load(cname) + + if err != nil && !os.IsNotExist(err) { + exit.WithError("Error loading profile config", err) + } + + if err != nil { + out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": cname}) + os.Exit(1) + } + + glog.Infof("config: %+v", cc) + host, err := cluster.CheckIfHostExistsAndLoad(api, cname) + if err != nil { + exit.WithError("Error getting host", err) + } + + r, err := machine.CommandRunner(host) + if err != nil { + exit.WithError("Failed to get command runner", err) + } + + cr, err := cruntime.New(cruntime.Config{Type: cc.ContainerRuntime, Runner: r}) + if err != nil { + exit.WithError("Failed runtime", err) + } + + glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings()) + if allNamespaces { + namespaces = nil //all + } else if len(namespaces) == 0 { + exit.WithCodeT(exit.BadUsage, "Use -A to specify all namespaces") + } + + ids, err := cluster.Pause(cr, r, namespaces) + if err != nil { + exit.WithError("Pause", err) + } + + if namespaces == nil { + out.T(out.Unpause, "Paused kubelet and {{.count}} containers", out.V{"count": len(ids)}) + } else { + out.T(out.Unpause, "Paused kubelet and {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")}) + } +} + +func init() { + pauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", cluster.DefaultNamespaces, "namespaces to pause") + pauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, pause all namespaces") +} diff --git a/cmd/minikube/cmd/root.go b/cmd/minikube/cmd/root.go index 344aee7effeb..ef6fb3e29e1c 100644 --- a/cmd/minikube/cmd/root.go +++ b/cmd/minikube/cmd/root.go @@ -172,6 +172,8 @@ func init() { stopCmd, deleteCmd, dashboardCmd, + pauseCmd, + unpauseCmd, }, }, { diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 91376babb774..64a20bcddfd1 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -19,12 +19,15 @@ package cmd import ( "encoding/json" "fmt" + "io" "os" "strings" "text/template" + "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/state" "github.com/golang/glog" + "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config" @@ -40,16 +43,13 @@ import ( var statusFormat string var output string -// KubeconfigStatus represents the kubeconfig status -var KubeconfigStatus = struct { - Configured string - Misconfigured string -}{ - Configured: `Configured`, - Misconfigured: `Misconfigured`, -} +const ( + // Additional states used by kubeconfig + Configured = "Configured" // analogous to state.Saved + Misconfigured = "Misconfigured" // analogous to state.Error +) -// Status represents the status +// Status holds string representations of component states type Status struct { Host string Kubelet string @@ -81,7 +81,6 @@ var statusCmd = &cobra.Command{ exit.UsageT("Cannot use both --output and --format options") } - var returnCode = 0 api, err := machine.NewAPIClient() if err != nil { exit.WithCodeT(exit.Unavailable, "Error getting client: {{.error}}", out.V{"error": err}) @@ -89,79 +88,92 @@ var statusCmd = &cobra.Command{ defer api.Close() machineName := viper.GetString(config.MachineProfile) - - hostSt, err := cluster.GetHostStatus(api, machineName) + st, err := status(api, machineName) if err != nil { - exit.WithError("Error getting host status", err) + glog.Errorf("status error: %v", err) } - kubeletSt := state.None.String() - kubeconfigSt := state.None.String() - apiserverSt := state.None.String() - - if hostSt == state.Running.String() { - clusterBootstrapper, err := getClusterBootstrapper(api, viper.GetString(cmdcfg.Bootstrapper)) - if err != nil { - exit.WithError("Error getting bootstrapper", err) + switch strings.ToLower(output) { + case "text": + if err := statusText(st, os.Stdout); err != nil { + exit.WithError("status text failure", err) } - kubeletSt, err = clusterBootstrapper.GetKubeletStatus() - if err != nil { - glog.Warningf("kubelet err: %v", err) - returnCode |= clusterNotRunningStatusFlag - } else if kubeletSt != state.Running.String() { - returnCode |= clusterNotRunningStatusFlag + case "json": + if err := statusJSON(st, os.Stdout); err != nil { + exit.WithError("status json failure", err) } + default: + exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) + } - ip, err := cluster.GetHostDriverIP(api, machineName) - if err != nil { - glog.Errorln("Error host driver ip status:", err) - } + os.Exit(exitCode(st)) + }, +} - apiserverPort, err := kubeconfig.Port(machineName) - if err != nil { - // Fallback to presuming default apiserver port - apiserverPort = constants.APIServerPort - } +func exitCode(st *Status) int { + c := 0 + if st.Host != state.Running.String() { + c |= minikubeNotRunningStatusFlag + } + if st.APIServer != state.Running.String() || st.Kubelet != state.Running.String() { + c |= clusterNotRunningStatusFlag + } + if st.Kubeconfig != Configured { + c |= k8sNotRunningStatusFlag + } + return c +} - apiserverSt, err = clusterBootstrapper.GetAPIServerStatus(ip, apiserverPort) - if err != nil { - glog.Errorln("Error apiserver status:", err) - } else if apiserverSt != state.Running.String() { - returnCode |= clusterNotRunningStatusFlag - } +func status(api libmachine.API, name string) (*Status, error) { + st := &Status{} + hs, err := cluster.GetHostStatus(api, name) + if err != nil { + return st, errors.Wrap(err, "host") + } + st.Host = hs + if st.Host != state.Running.String() { + return st, nil + } - ks, err := kubeconfig.IsClusterInConfig(ip, machineName) - if err != nil { - glog.Errorln("Error kubeconfig status:", err) - } - if ks { - kubeconfigSt = KubeconfigStatus.Configured - } else { - kubeconfigSt = KubeconfigStatus.Misconfigured - returnCode |= k8sNotRunningStatusFlag - } - } else { - returnCode |= minikubeNotRunningStatusFlag - } + bs, err := getClusterBootstrapper(api, viper.GetString(cmdcfg.Bootstrapper)) + if err != nil { + return st, errors.Wrap(err, "bootstrapper") + } - status := Status{ - Host: hostSt, - Kubelet: kubeletSt, - APIServer: apiserverSt, - Kubeconfig: kubeconfigSt, - } + st.Kubelet, err = bs.GetKubeletStatus() + if err != nil { + glog.Warningf("kubelet err: %v", err) + st.Kubelet = state.Error.String() + } - switch strings.ToLower(output) { - case "text": - printStatusText(status) - case "json": - printStatusJSON(status) - default: - exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output)) - } + ip, err := cluster.GetHostDriverIP(api, name) + if err != nil { + glog.Errorln("Error host driver ip status:", err) + st.APIServer = state.Error.String() + return st, err + } - os.Exit(returnCode) - }, + port, err := kubeconfig.Port(name) + if err != nil { + glog.Warningf("unable to get port: %v", err) + port = constants.APIServerPort + } + + st.APIServer, err = bs.GetAPIServerStatus(ip, port) + if err != nil { + glog.Errorln("Error apiserver status:", err) + st.APIServer = state.Error.String() + } + + st.Kubeconfig = Misconfigured + ks, err := kubeconfig.IsClusterInConfig(ip, name) + if err != nil { + glog.Errorln("Error kubeconfig status:", err) + } + if ks { + st.Kubeconfig = Configured + } + return st, nil } func init() { @@ -172,25 +184,26 @@ For the list accessible variables for the template, see the struct values here: `minikube status --output OUTPUT. json, text`) } -var printStatusText = func(status Status) { +func statusText(st *Status, w io.Writer) error { tmpl, err := template.New("status").Parse(statusFormat) if err != nil { - exit.WithError("Error creating status template", err) + return err } - err = tmpl.Execute(os.Stdout, status) - if err != nil { - exit.WithError("Error executing status template", err) + if err := tmpl.Execute(w, st); err != nil { + return err } - if status.Kubeconfig == KubeconfigStatus.Misconfigured { - out.WarningT("Warning: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`") + if st.Kubeconfig == Misconfigured { + _, err := w.Write([]byte("\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n")) + return err } + return nil } -var printStatusJSON = func(status Status) { - - jsonString, err := json.Marshal(status) +func statusJSON(st *Status, w io.Writer) error { + js, err := json.Marshal(st) if err != nil { - exit.WithError("Error converting status to json", err) + return err } - out.String(string(jsonString)) + _, err = w.Write(js) + return err } diff --git a/cmd/minikube/cmd/unpause.go b/cmd/minikube/cmd/unpause.go new file mode 100644 index 000000000000..a0d282072131 --- /dev/null +++ b/cmd/minikube/cmd/unpause.go @@ -0,0 +1,98 @@ +/* +Copyright 2020 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "os" + "strings" + + "github.com/golang/glog" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/exit" + "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/out" +) + +// unpauseCmd represents the docker-pause command +var unpauseCmd = &cobra.Command{ + Use: "unpause", + Short: "unpause Kubernetes", + Run: func(cmd *cobra.Command, args []string) { + cname := viper.GetString(config.MachineProfile) + api, err := machine.NewAPIClient() + if err != nil { + exit.WithError("Error getting client", err) + } + defer api.Close() + cc, err := config.Load(cname) + + if err != nil && !os.IsNotExist(err) { + exit.WithError("Error loading profile config", err) + } + + if err != nil { + out.ErrT(out.Meh, `"{{.name}}" profile does not exist`, out.V{"name": cname}) + os.Exit(1) + } + glog.Infof("config: %+v", cc) + host, err := cluster.CheckIfHostExistsAndLoad(api, cname) + if err != nil { + exit.WithError("Error getting host", err) + } + + r, err := machine.CommandRunner(host) + if err != nil { + exit.WithError("Failed to get command runner", err) + } + + cr, err := cruntime.New(cruntime.Config{Type: cc.ContainerRuntime, Runner: r}) + if err != nil { + exit.WithError("Failed runtime", err) + } + + glog.Infof("namespaces: %v keys: %v", namespaces, viper.AllSettings()) + if allNamespaces { + namespaces = nil //all + } else { + if len(namespaces) == 0 { + exit.WithCodeT(exit.BadUsage, "Use -A to specify all namespaces") + } + } + + ids, err := cluster.Unpause(cr, r, namespaces) + if err != nil { + exit.WithError("Pause", err) + } + + if namespaces == nil { + out.T(out.Pause, "Unpaused kubelet and {{.count}} containers", out.V{"count": len(ids)}) + } else { + out.T(out.Pause, "Unpaused kubelet and {{.count}} containers in: {{.namespaces}}", out.V{"count": len(ids), "namespaces": strings.Join(namespaces, ", ")}) + } + + }, +} + +func init() { + unpauseCmd.Flags().StringSliceVarP(&namespaces, "--namespaces", "n", cluster.DefaultNamespaces, "namespaces to unpause") + unpauseCmd.Flags().BoolVarP(&allNamespaces, "all-namespaces", "A", false, "If set, unpause all namespaces") +} diff --git a/go.sum b/go.sum index 2c4f7ca288ad..edafefc48029 100644 --- a/go.sum +++ b/go.sum @@ -730,6 +730,7 @@ k8s.io/kubernetes v1.15.2 h1:RO9EuRw5vlN3oa/lnmPxmywOoJRtg9o40KcklHXNIAQ= k8s.io/kubernetes v1.15.2/go.mod h1:3RE5ikMc73WK+dSxk4pQuQ6ZaJcPXiZX2dj98RcdCuM= k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20190623232353-8c3b7d7679cc h1:vZ5+77WP1yImZo23wc75vV5b5zCGq9gv484q8Yw5sBw= k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:pU9hbGZc8Z6+6HlNLEFY1GiNGzcCykU1Glsd4vEea2U= +k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20190623232353-8c3b7d7679cc h1:8L3YgoEmmOxIGWNv9Hj6WhJuUspT+sw4gJs2nEc0qI0= k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:Q49J/iUBV6A9nn8loyV72DK2EXhN8sqCR8FyfxIFDA4= k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20190623232353-8c3b7d7679cc h1:SHxaBZWgNouwsZCVg2+iffu0Um1ExSLPKgvO1drWShs= k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:rRBYbORqofLsn4/tsQWkeXkdKUoGrTfUwbI9s7NhU0Q= @@ -748,10 +749,12 @@ k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20190623232353-8c3b7d7679cc/ k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:5RWpGgZKzUcW9gCtmSVRq8maZkOetGv87HrohpTrnLI= k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:ogOX4l9UCMfFGIF+FZqmsln4NtCGPqf9zTMCIlm2YX4= k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:o6aAFW1lCnr5CJm1riWnhQskrAHhyt8btyv5UHhgZ6c= +k8s.io/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20190623232353-8c3b7d7679cc h1:j30roBbl6b5Mom66efcNOHyjdYXU2RD8UWYnL0Adb8I= k8s.io/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:y0hpsQGN8h3HcNqYbpSZEH4yC1ohi45N35c8ma9yg6M= k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:/hbCTKdfutEO2iTQv8NuYcnAxd8Tuu4mMEymYv/EZHk= k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20190623232353-8c3b7d7679cc h1:Bsljf/3UDy91qqLkevAiq6y+wl0qJrkLjWfBCQs9rss= k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:Vg6Q3IDU3hfYMICKyb43lClOXWtCtOBh2o1FfuQw8mQ= +k8s.io/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20190623232353-8c3b7d7679cc h1:ZUouIndlzPLGsRpeNAswxcs//fyODrNZOYybP6JZ9mM= k8s.io/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:9UInPlSttlDwZBFNMAsqhTtl7zH00dE2M88B9Z0Ennc= k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:xlTRb77uaXbuT6evILwFescWPMENFKYGYj3a/kOjYQE= k8s.io/kubernetes/staging/src/k8s.io/metrics v0.0.0-20190623232353-8c3b7d7679cc/go.mod h1:6Cs3k9ccbWbJo3CQOrGDu2QEVLwsWbBlu9HitjPhuSk= diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index 4d99fd8b8ed1..c11a175ac6eb 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -137,7 +137,7 @@ func (d *Driver) Kill() error { } // First try to gracefully stop containers - containers, err := d.runtime.ListContainers("") + containers, err := d.runtime.ListContainers(cruntime.ListOptions{}) if err != nil { return errors.Wrap(err, "containers") } @@ -149,7 +149,7 @@ func (d *Driver) Kill() error { return errors.Wrap(err, "stop") } - containers, err = d.runtime.ListContainers("") + containers, err = d.runtime.ListContainers(cruntime.ListOptions{}) if err != nil { return errors.Wrap(err, "containers") } @@ -199,7 +199,7 @@ func (d *Driver) Stop() error { if err := stopKubelet(d.exec); err != nil { return err } - containers, err := d.runtime.ListContainers("") + containers, err := d.runtime.ListContainers(cruntime.ListOptions{}) if err != nil { return errors.Wrap(err, "containers") } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 1f27f156cddf..5569dc67bacd 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -81,9 +81,11 @@ func NewBootstrapper(api libmachine.API) (*Bootstrapper, error) { func (k *Bootstrapper) GetKubeletStatus() (string, error) { rr, err := k.c.RunCmd(exec.Command("sudo", "systemctl", "is-active", "kubelet")) if err != nil { - return "", errors.Wrapf(err, "getting kublet status. command: %q", rr.Command()) + // Do not return now, as we still have parsing to do! + glog.Warningf("%s returned error: %v", rr.Command(), err) } s := strings.TrimSpace(rr.Stdout.String()) + glog.Infof("kubelet is-active: %s", s) switch s { case "active": return state.Running.String(), nil @@ -97,6 +99,40 @@ func (k *Bootstrapper) GetKubeletStatus() (string, error) { // GetAPIServerStatus returns the api-server status func (k *Bootstrapper) GetAPIServerStatus(ip net.IP, apiserverPort int) (string, error) { + // sudo, in case hidepid is set + rr, err := k.c.RunCmd(exec.Command("sudo", "pgrep", "kube-apiserver")) + if err != nil { + return state.Stopped.String(), nil + } + pid := strings.TrimSpace(rr.Stdout.String()) + + // Get the freezer cgroup entry for this pid + rr, err = k.c.RunCmd(exec.Command("sudo", "egrep", "^[0-9]+:freezer:", path.Join("/proc", pid, "cgroup"))) + if err != nil { + glog.Warningf("unable to find freezer cgroup: %v", err) + return kverify.APIServerStatus(ip, apiserverPort) + + } + freezer := strings.TrimSpace(rr.Stdout.String()) + glog.Infof("apiserver freezer: %q", freezer) + fparts := strings.Split(freezer, ":") + if len(fparts) != 3 { + glog.Warningf("unable to parse freezer - found %d parts: %s", len(fparts), freezer) + return kverify.APIServerStatus(ip, apiserverPort) + } + + rr, err = k.c.RunCmd(exec.Command("sudo", "cat", path.Join("/sys/fs/cgroup/freezer", fparts[2], "freezer.state"))) + if err != nil { + glog.Errorf("unable to get freezer state: %s", rr.Stderr.String()) + return kverify.APIServerStatus(ip, apiserverPort) + } + + fs := strings.TrimSpace(rr.Stdout.String()) + glog.Infof("freezer state: %q", fs) + if fs == "FREEZING" || fs == "FROZEN" { + return state.Paused.String(), nil + } + return kverify.APIServerStatus(ip, apiserverPort) } diff --git a/pkg/minikube/cluster/pause.go b/pkg/minikube/cluster/pause.go new file mode 100644 index 000000000000..2f98cf6de36d --- /dev/null +++ b/pkg/minikube/cluster/pause.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "github.com/golang/glog" + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/kubelet" +) + +// DefaultNamespaces are namespaces used by minikube, including addons +var DefaultNamespaces = []string{ + "kube-system", + "kubernetes-dashboard", + "storage-gluster", + "istio-operator", +} + +// Pause pauses a Kubernetes cluster +func Pause(cr cruntime.Manager, r command.Runner, namespaces []string) ([]string, error) { + ids := []string{} + // Disable the kubelet so it does not attempt to restart paused pods + if err := kubelet.Disable(r); err != nil { + return ids, errors.Wrap(err, "kubelet disable") + } + if err := kubelet.Stop(r); err != nil { + return ids, errors.Wrap(err, "kubelet stop") + } + ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Running, Namespaces: namespaces}) + if err != nil { + return ids, errors.Wrap(err, "list running") + } + if len(ids) == 0 { + glog.Warningf("no running containers to pause") + return ids, nil + } + return ids, cr.PauseContainers(ids) + +} + +// Unpause unpauses a Kubernetes cluster +func Unpause(cr cruntime.Manager, r command.Runner, namespaces []string) ([]string, error) { + ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Paused, Namespaces: namespaces}) + if err != nil { + return ids, errors.Wrap(err, "list paused") + } + + if len(ids) == 0 { + glog.Warningf("no paused containers found") + } else if err := cr.UnpauseContainers(ids); err != nil { + return ids, errors.Wrap(err, "unpause") + } + + if err := kubelet.Enable(r); err != nil { + return ids, errors.Wrap(err, "kubelet enable") + } + if err := kubelet.Start(r); err != nil { + return ids, errors.Wrap(err, "kubelet start") + } + return ids, nil +} diff --git a/pkg/minikube/cruntime/containerd.go b/pkg/minikube/cruntime/containerd.go index fe10692464e3..3e94fc60209d 100644 --- a/pkg/minikube/cruntime/containerd.go +++ b/pkg/minikube/cruntime/containerd.go @@ -32,6 +32,7 @@ import ( ) const ( + containerdNamespaceRoot = "/run/containerd/runc/k8s.io" // ContainerdConfFile is the path to the containerd configuration containerdConfigFile = "/etc/containerd/config.toml" containerdConfigTemplate = `root = "/var/lib/containerd" @@ -274,8 +275,18 @@ func (r *Containerd) KubeletOptions() map[string]string { } // ListContainers returns a list of managed by this container runtime -func (r *Containerd) ListContainers(filter string) ([]string, error) { - return listCRIContainers(r.Runner, filter) +func (r *Containerd) ListContainers(o ListOptions) ([]string, error) { + return listCRIContainers(r.Runner, containerdNamespaceRoot, o) +} + +// PauseContainers pauses a running container based on ID +func (r *Containerd) PauseContainers(ids []string) error { + return pauseCRIContainers(r.Runner, containerdNamespaceRoot, ids) +} + +// PauseContainers pauses a running container based on ID +func (r *Containerd) UnpauseContainers(ids []string) error { + return unpauseCRIContainers(r.Runner, containerdNamespaceRoot, ids) } // KillContainers removes containers based on ID diff --git a/pkg/minikube/cruntime/cri.go b/pkg/minikube/cruntime/cri.go index 6905b251465c..2cf50c78e299 100644 --- a/pkg/minikube/cruntime/cri.go +++ b/pkg/minikube/cruntime/cri.go @@ -30,6 +30,120 @@ import ( "k8s.io/minikube/pkg/minikube/command" ) +// container maps to 'runc list -f json' +type container struct { + ID string + Status string +} + +// crictlList returns the output of 'crictl ps' in an efficient manner +func crictlList(cr CommandRunner, root string, o ListOptions) (*command.RunResult, error) { + glog.Infof("listing CRI containers in root %s: %+v", root, o) + + // Use -a because otherwise paused containers are missed + baseCmd := []string{"crictl", "ps", "-a", "--quiet"} + + if o.Name != "" { + baseCmd = append(baseCmd, fmt.Sprintf("--name=%s", o.Name)) + } + + // shortcut for all namespaces + if len(o.Namespaces) == 0 { + return cr.RunCmd(exec.Command("sudo", baseCmd...)) + } + + // Gather containers for all namespaces without causing extraneous shells to be launched + cmds := []string{} + for _, ns := range o.Namespaces { + cmd := fmt.Sprintf("%s --label io.kubernetes.pod.namespace=%s", strings.Join(baseCmd, " "), ns) + cmds = append(cmds, cmd) + } + + return cr.RunCmd(exec.Command("sudo", "-s", "eval", strings.Join(cmds, "; "))) +} + +// listCRIContainers returns a list of containers +func listCRIContainers(cr CommandRunner, root string, o ListOptions) ([]string, error) { + rr, err := crictlList(cr, root, o) + if err != nil { + return nil, errors.Wrap(err, "crictl list") + } + + // Avoid an id named "" + var ids []string + seen := map[string]bool{} + for _, id := range strings.Split(rr.Stdout.String(), "\n") { + glog.Infof("found id: %q", id) + if id != "" && !seen[id] { + ids = append(ids, id) + seen[id] = true + } + } + + if len(ids) == 0 { + return nil, nil + } + if o.State == All { + return ids, nil + } + + // crictl does not understand paused pods + cs := []container{} + args := []string{"runc"} + if root != "" { + args = append(args, "--root", root) + } + + args = append(args, "list", "-f", "json") + rr, err = cr.RunCmd(exec.Command("sudo", args...)) + if err != nil { + return nil, errors.Wrap(err, "runc") + } + content := rr.Stdout.Bytes() + glog.Infof("JSON = %s", content) + d := json.NewDecoder(bytes.NewReader(content)) + if err := d.Decode(&cs); err != nil { + return nil, err + } + + if len(cs) == 0 { + return nil, fmt.Errorf("list returned 0 containers, but ps returned %d", len(ids)) + } + + glog.Infof("list returned %d containers", len(cs)) + var fids []string + for _, c := range cs { + glog.Infof("container: %+v", c) + if !seen[c.ID] { + glog.Infof("skipping %s - not in ps", c.ID) + continue + } + if o.State != All && o.State.String() != c.Status { + glog.Infof("skipping %s: state = %q, want %q", c, c.Status, o.State) + continue + } + fids = append(fids, c.ID) + } + return fids, nil +} + +// pauseContainers pauses a list of containers +func pauseCRIContainers(cr CommandRunner, root string, ids []string) error { + args := []string{"runc"} + if root != "" { + args = append(args, "--root", root) + } + args = append(args, "pause") + + for _, id := range ids { + cargs := append(args, id) + if _, err := cr.RunCmd(exec.Command("sudo", cargs...)); err != nil { + return errors.Wrap(err, "runc") + } + } + return nil +} + // getCrictlPath returns the absolute path of crictl func getCrictlPath(cr CommandRunner) string { cmd := "crictl" @@ -40,28 +154,21 @@ func getCrictlPath(cr CommandRunner) string { return strings.Split(rr.Stdout.String(), "\n")[0] } -// listCRIContainers returns a list of containers using crictl -func listCRIContainers(cr CommandRunner, filter string) ([]string, error) { - var err error - var rr *command.RunResult - state := "Running" - crictl := getCrictlPath(cr) - if filter != "" { - c := exec.Command("sudo", crictl, "ps", "-a", fmt.Sprintf("--name=%s", filter), fmt.Sprintf("--state=%s", state), "--quiet") - rr, err = cr.RunCmd(c) - } else { - rr, err = cr.RunCmd(exec.Command("sudo", crictl, "ps", "-a", fmt.Sprintf("--state=%s", state), "--quiet")) +// unpauseCRIContainers pauses a list of containers +func unpauseCRIContainers(cr CommandRunner, root string, ids []string) error { + args := []string{"runc"} + if root != "" { + args = append(args, "--root", root) } - if err != nil { - return nil, err - } - var ids []string - for _, line := range strings.Split(rr.Stderr.String(), "\n") { - if line != "" { - ids = append(ids, line) + args = append(args, "resume") + + for _, id := range ids { + cargs := append(args, id) + if _, err := cr.RunCmd(exec.Command("sudo", cargs...)); err != nil { + return errors.Wrap(err, "runc") } } - return ids, nil + return nil } // criCRIContainers kills a list of containers using crictl @@ -75,7 +182,7 @@ func killCRIContainers(cr CommandRunner, ids []string) error { args := append([]string{crictl, "rm"}, ids...) c := exec.Command("sudo", args...) if _, err := cr.RunCmd(c); err != nil { - return errors.Wrap(err, "kill cri containers.") + return errors.Wrap(err, "crictl") } return nil } @@ -91,10 +198,9 @@ func stopCRIContainers(cr CommandRunner, ids []string) error { args := append([]string{crictl, "rm"}, ids...) c := exec.Command("sudo", args...) if _, err := cr.RunCmd(c); err != nil { - return errors.Wrap(err, "stop cri containers") + return errors.Wrap(err, "crictl") } return nil - } // populateCRIConfig sets up /etc/crictl.yaml diff --git a/pkg/minikube/cruntime/crio.go b/pkg/minikube/cruntime/crio.go index cd0d6c7a514b..dc4670cca765 100644 --- a/pkg/minikube/cruntime/crio.go +++ b/pkg/minikube/cruntime/crio.go @@ -27,6 +27,11 @@ import ( "k8s.io/minikube/pkg/minikube/out" ) +const ( + // CRIOConfFile is the path to the CRI-O configuration + crioConfigFile = "/etc/crio/crio.conf" +) + // CRIO contains CRIO runtime state type CRIO struct { Socket string @@ -35,11 +40,6 @@ type CRIO struct { KubernetesVersion string } -const ( - // CRIOConfFile is the path to the CRI-O configuration - crioConfigFile = "/etc/crio/crio.conf" -) - // generateCRIOConfig sets up /etc/crio/crio.conf func generateCRIOConfig(cr CommandRunner, imageRepository string) error { cPath := crioConfigFile @@ -192,8 +192,18 @@ func (r *CRIO) KubeletOptions() map[string]string { } // ListContainers returns a list of managed by this container runtime -func (r *CRIO) ListContainers(filter string) ([]string, error) { - return listCRIContainers(r.Runner, filter) +func (r *CRIO) ListContainers(o ListOptions) ([]string, error) { + return listCRIContainers(r.Runner, "", o) +} + +// PauseContainers pauses a running container based on ID +func (r *CRIO) PauseContainers(ids []string) error { + return pauseCRIContainers(r.Runner, "", ids) +} + +// PauseContainers pauses a running container based on ID +func (r *CRIO) UnpauseContainers(ids []string) error { + return unpauseCRIContainers(r.Runner, "", ids) } // KillContainers removes containers based on ID diff --git a/pkg/minikube/cruntime/cruntime.go b/pkg/minikube/cruntime/cruntime.go index 11ce642d4409..2705a012ce70 100644 --- a/pkg/minikube/cruntime/cruntime.go +++ b/pkg/minikube/cruntime/cruntime.go @@ -27,6 +27,18 @@ import ( "k8s.io/minikube/pkg/minikube/out" ) +type ContainerState int + +const ( + All ContainerState = iota + Running + Paused +) + +func (cs ContainerState) String() string { + return [...]string{"all", "running", "paused"}[cs] +} + // CommandRunner is the subset of command.Runner this package consumes type CommandRunner interface { RunCmd(cmd *exec.Cmd) (*command.RunResult, error) @@ -65,11 +77,15 @@ type Manager interface { ImageExists(string, string) bool // ListContainers returns a list of managed by this container runtime - ListContainers(string) ([]string, error) + ListContainers(ListOptions) ([]string, error) // KillContainers removes containers based on ID KillContainers([]string) error // StopContainers stops containers based on ID StopContainers([]string) error + // PauseContainers pauses containers based on ID + PauseContainers([]string) error + // UnpauseContainers unpauses containers based on ID + UnpauseContainers([]string) error // ContainerLogCmd returns the command to retrieve the log for a container based on ID ContainerLogCmd(string, int, bool) string // SystemLogCmd returns the command to return the system logs @@ -90,6 +106,15 @@ type Config struct { KubernetesVersion string } +type ListOptions struct { + // State is the container state to filter by (All, Running, Paused) + State ContainerState + // Name is a name filter + Name string + // Namespaces is the namespaces to look into + Namespaces []string +} + // New returns an appropriately configured runtime func New(c Config) (Manager, error) { switch c.Type { diff --git a/pkg/minikube/cruntime/cruntime_test.go b/pkg/minikube/cruntime/cruntime_test.go index cb92244ce37c..7764e1b7140a 100644 --- a/pkg/minikube/cruntime/cruntime_test.go +++ b/pkg/minikube/cruntime/cruntime_test.go @@ -118,9 +118,9 @@ func TestKubeletOptions(t *testing.T) { type serviceState int const ( - Exited serviceState = iota - Running - Restarted + SvcExited serviceState = iota + SvcRunning + SvcRestarted ) // FakeRunner is a command runner that isn't very smart. @@ -262,6 +262,7 @@ func (f *FakeRunner) containerd(args []string, _ bool) (string, error) { // crictl is a fake implementation of crictl func (f *FakeRunner) crictl(args []string, _ bool) (string, error) { + f.t.Logf("crictl args: %s", args) switch cmd := args[0]; cmd { case "info": return `{ @@ -273,9 +274,21 @@ func (f *FakeRunner) crictl(args []string, _ bool) (string, error) { "golang": "go1.11.13" }`, nil case "ps": + fmt.Printf("args %d: %v\n", len(args), args) + if len(args) != 4 { + f.t.Logf("crictl all") + ids := []string{} + for id := range f.containers { + ids = append(ids, id) + } + f.t.Logf("fake crictl: Found containers: %v", ids) + return strings.Join(ids, "\n"), nil + } + // crictl ps -a --name=apiserver --state=Running --quiet - if args[1] == "-a" && strings.HasPrefix(args[2], "--name") { - fname := strings.Split(args[2], "=")[1] + if args[1] == "-a" && strings.HasPrefix(args[3], "--name") { + fname := strings.Split(args[3], "=")[1] + f.t.Logf("crictl filter for %s", fname) ids := []string{} f.t.Logf("fake crictl: Looking for containers matching %q", fname) for id, cname := range f.containers { @@ -285,14 +298,6 @@ func (f *FakeRunner) crictl(args []string, _ bool) (string, error) { } f.t.Logf("fake crictl: Found containers: %v", ids) return strings.Join(ids, "\n"), nil - } else if args[1] == "-a" { - ids := []string{} - for id := range f.containers { - ids = append(ids, id) - } - f.t.Logf("fake crictl: Found containers: %v", ids) - return strings.Join(ids, "\n"), nil - } case "stop": for _, id := range args[1:] { @@ -341,23 +346,23 @@ func (f *FakeRunner) systemctl(args []string, root bool) (string, error) { // no if !root { return out, fmt.Errorf("not root") } - f.services[svc] = Exited + f.services[svc] = SvcExited f.t.Logf("fake systemctl: stopped %s", svc) case "start": if !root { return out, fmt.Errorf("not root") } - f.services[svc] = Running + f.services[svc] = SvcRunning f.t.Logf("fake systemctl: started %s", svc) case "restart": if !root { return out, fmt.Errorf("not root") } - f.services[svc] = Restarted - f.t.Logf("fake systemctl: restarted %s", svc) + f.services[svc] = SvcRestarted + f.t.Logf("fake systemctl: SvcRestarted %s", svc) case "is-active": f.t.Logf("fake systemctl: %s is-status: %v", svc, state) - if state == Running { + if state == SvcRunning { return out, nil } return out, fmt.Errorf("%s in state: %v", svc, state) @@ -403,11 +408,11 @@ func TestVersion(t *testing.T) { // defaultServices reflects the default boot state for the minikube VM var defaultServices = map[string]serviceState{ - "docker": Running, - "docker.socket": Running, - "crio": Exited, - "crio-shutdown": Exited, - "containerd": Exited, + "docker": SvcRunning, + "docker.socket": SvcRunning, + "crio": SvcExited, + "crio-shutdown": SvcExited, + "containerd": SvcExited, } func TestDisable(t *testing.T) { @@ -446,25 +451,25 @@ func TestEnable(t *testing.T) { want map[string]serviceState }{ {"docker", map[string]serviceState{ - "docker": Running, - "docker.socket": Running, - "containerd": Exited, - "crio": Exited, - "crio-shutdown": Exited, + "docker": SvcRunning, + "docker.socket": SvcRunning, + "containerd": SvcExited, + "crio": SvcExited, + "crio-shutdown": SvcExited, }}, {"containerd", map[string]serviceState{ - "docker": Exited, - "docker.socket": Exited, - "containerd": Restarted, - "crio": Exited, - "crio-shutdown": Exited, + "docker": SvcExited, + "docker.socket": SvcExited, + "containerd": SvcRestarted, + "crio": SvcExited, + "crio-shutdown": SvcExited, }}, {"crio", map[string]serviceState{ - "docker": Exited, - "docker.socket": Exited, - "containerd": Exited, - "crio": Restarted, - "crio-shutdown": Exited, + "docker": SvcExited, + "docker.socket": SvcExited, + "containerd": SvcExited, + "crio": SvcRestarted, + "crio-shutdown": SvcExited, }}, } for _, tc := range tests { @@ -516,7 +521,7 @@ func TestContainerFunctions(t *testing.T) { } // Get the list of apiservers - got, err := cr.ListContainers("apiserver") + got, err := cr.ListContainers(ListOptions{Name: "apiserver"}) if err != nil { t.Fatalf("ListContainers: %v", err) } @@ -529,7 +534,7 @@ func TestContainerFunctions(t *testing.T) { if err := cr.StopContainers(got); err != nil { t.Fatalf("stop failed: %v", err) } - got, err = cr.ListContainers("apiserver") + got, err = cr.ListContainers(ListOptions{Name: "apiserver"}) if err != nil { t.Fatalf("ListContainers: %v", err) } @@ -539,7 +544,7 @@ func TestContainerFunctions(t *testing.T) { } // Get the list of everything else. - got, err = cr.ListContainers("") + got, err = cr.ListContainers(ListOptions{}) if err != nil { t.Fatalf("ListContainers: %v", err) } @@ -552,7 +557,7 @@ func TestContainerFunctions(t *testing.T) { if err := cr.KillContainers(got); err != nil { t.Errorf("KillContainers: %v", err) } - got, err = cr.ListContainers("") + got, err = cr.ListContainers(ListOptions{}) if err != nil { t.Fatalf("ListContainers: %v", err) } diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index 76ecf397b0ef..24fb506c94e1 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -146,11 +146,27 @@ func (r *Docker) KubeletOptions() map[string]string { } // ListContainers returns a list of containers -func (r *Docker) ListContainers(filter string) ([]string, error) { - filter = KubernetesContainerPrefix + filter - rr, err := r.Runner.RunCmd(exec.Command("docker", "ps", "-a", fmt.Sprintf("--filter=name=%s", filter), "--format=\"{{.ID}}\"")) +func (r *Docker) ListContainers(o ListOptions) ([]string, error) { + args := []string{"ps"} + switch o.State { + case All: + args = append(args, "-a") + case Running: + args = append(args, "--filter", "status=running") + case Paused: + args = append(args, "--filter", "status=paused") + } + + nameFilter := KubernetesContainerPrefix + o.Name + if len(o.Namespaces) > 0 { + // Example result: k8s.*(kube-system|kubernetes-dashboard) + nameFilter = fmt.Sprintf("%s.*_(%s)_", nameFilter, strings.Join(o.Namespaces, "|")) + } + + args = append(args, fmt.Sprintf("--filter=name=%s", nameFilter), "--format={{.ID}}") + rr, err := r.Runner.RunCmd(exec.Command("docker", args...)) if err != nil { - return nil, errors.Wrapf(err, "docker ListContainers. ") + return nil, errors.Wrapf(err, "docker") } var ids []string for _, line := range strings.Split(rr.Stdout.String(), "\n") { @@ -184,7 +200,35 @@ func (r *Docker) StopContainers(ids []string) error { args := append([]string{"stop"}, ids...) c := exec.Command("docker", args...) if _, err := r.Runner.RunCmd(c); err != nil { - return errors.Wrap(err, "stopping containers docker.") + return errors.Wrap(err, "docker") + } + return nil +} + +// PauseContainers pauses a running container based on ID +func (r *Docker) PauseContainers(ids []string) error { + if len(ids) == 0 { + return nil + } + glog.Infof("Pausing containers: %s", ids) + args := append([]string{"pause"}, ids...) + c := exec.Command("docker", args...) + if _, err := r.Runner.RunCmd(c); err != nil { + return errors.Wrap(err, "docker") + } + return nil +} + +// UnpauseContainers unpauses a container based on ID +func (r *Docker) UnpauseContainers(ids []string) error { + if len(ids) == 0 { + return nil + } + glog.Infof("Unpausing containers: %s", ids) + args := append([]string{"unpause"}, ids...) + c := exec.Command("docker", args...) + if _, err := r.Runner.RunCmd(c); err != nil { + return errors.Wrap(err, "docker") } return nil } diff --git a/pkg/minikube/kubelet/kubelet.go b/pkg/minikube/kubelet/kubelet.go new file mode 100644 index 000000000000..315f492878cc --- /dev/null +++ b/pkg/minikube/kubelet/kubelet.go @@ -0,0 +1,105 @@ +/* +Copyright 2019 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "os/exec" + "strings" + "time" + + "github.com/golang/glog" + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/command" + "k8s.io/minikube/pkg/util/retry" +) + +// Stop idempotently stops the kubelet +func Stop(cr command.Runner) error { + glog.Infof("stopping kubelet ...") + stop := func() error { + cmd := exec.Command("sudo", "systemctl", "stop", "kubelet.service") + if rr, err := cr.RunCmd(cmd); err != nil { + glog.Errorf("temporary error for %q : %v", rr.Command(), err) + } + cmd = exec.Command("sudo", "systemctl", "show", "-p", "SubState", "kubelet") + rr, err := cr.RunCmd(cmd) + if err != nil { + glog.Errorf("temporary error: for %q : %v", rr.Command(), err) + } + if !strings.Contains(rr.Stdout.String(), "dead") && !strings.Contains(rr.Stdout.String(), "failed") { + return fmt.Errorf("unexpected kubelet state: %q", rr.Stdout.String()) + } + return nil + } + + if err := retry.Expo(stop, 2*time.Second, time.Minute*3, 5); err != nil { + return errors.Wrapf(err, "error stopping kubelet") + } + + return nil +} + +// Start starts the kubelet +func Start(cr command.Runner) error { + glog.Infof("restarting kubelet.service ...") + c := exec.Command("sudo", "systemctl", "start", "kubelet") + if _, err := cr.RunCmd(c); err != nil { + return err + } + return nil +} + +// Restart restarts the kubelet +func Restart(cr command.Runner) error { + glog.Infof("restarting kubelet.service ...") + c := exec.Command("sudo", "systemctl", "restart", "kubelet.service") + if _, err := cr.RunCmd(c); err != nil { + return err + } + return nil +} + +// Check checks on the status of the kubelet +func Check(cr command.Runner) error { + glog.Infof("checking for running kubelet ...") + c := exec.Command("systemctl", "is-active", "--quiet", "service", "kubelet") + if _, err := cr.RunCmd(c); err != nil { + return errors.Wrap(err, "check kubelet") + } + return nil +} + +// Disable disables the Kubelet +func Disable(cr command.Runner) error { + glog.Infof("disabling kubelet ...") + c := exec.Command("sudo", "systemctl", "disable", "kubelet") + if _, err := cr.RunCmd(c); err != nil { + return errors.Wrap(err, "disable") + } + return nil +} + +// Enable enables the Kubelet +func Enable(cr command.Runner) error { + glog.Infof("enabling kubelet ...") + c := exec.Command("sudo", "systemctl", "enable", "kubelet") + if _, err := cr.RunCmd(c); err != nil { + return errors.Wrap(err, "enable") + } + return nil +} diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index 79328d947eed..ef26e9d7d741 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -169,7 +169,7 @@ func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Run func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, follow bool) map[string]string { cmds := bs.LogCommands(bootstrapper.LogOptions{Lines: length, Follow: follow}) for _, pod := range importantPods { - ids, err := r.ListContainers(pod) + ids, err := r.ListContainers(cruntime.ListOptions{Name: pod}) if err != nil { glog.Errorf("Failed to list containers for %q: %v", pod, err) continue diff --git a/pkg/minikube/out/style.go b/pkg/minikube/out/style.go index 3695ed416725..682ca2fc0c08 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/out/style.go @@ -81,6 +81,8 @@ var styles = map[StyleEnum]style{ Celebration: {Prefix: "πŸŽ‰ "}, Workaround: {Prefix: "πŸ‘‰ ", LowPrefix: lowIndent}, Sparkle: {Prefix: "✨ "}, + Pause: {Prefix: "⏸️ "}, + Unpause: {Prefix: "⏯️ "}, // Specialized purpose styles ISODownload: {Prefix: "πŸ’Ώ "}, diff --git a/pkg/minikube/out/style_enum.go b/pkg/minikube/out/style_enum.go index 81e232b14194..0d0c6e9ae1f5 100644 --- a/pkg/minikube/out/style_enum.go +++ b/pkg/minikube/out/style_enum.go @@ -84,5 +84,7 @@ const ( Empty Workaround Sparkle + Pause + Unpause DryRun ) diff --git a/test/integration/helpers.go b/test/integration/helpers.go index 8d145da091bf..1453f84cdbf0 100644 --- a/test/integration/helpers.go +++ b/test/integration/helpers.go @@ -321,16 +321,6 @@ func showPodLogs(ctx context.Context, t *testing.T, profile string, ns string, n } } -// Status returns the minikube cluster status as a string -func Status(ctx context.Context, t *testing.T, path string, profile string) string { - t.Helper() - rr, err := Run(t, exec.CommandContext(ctx, path, "status", "--format={{.Host}}", "-p", profile)) - if err != nil { - t.Logf("status error: %v (may be ok)", err) - } - return strings.TrimSpace(rr.Stdout.String()) -} - // MaybeParallel sets that the test should run in parallel func MaybeParallel(t *testing.T) { t.Helper() diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index ad1b6832b5ff..2ea0a44aa840 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -36,6 +36,16 @@ import ( "k8s.io/minikube/pkg/minikube/constants" ) +// status returns a minikube component status as a string +func status(ctx context.Context, t *testing.T, path string, profile string, key string) string { + t.Helper() + rr, err := Run(t, exec.CommandContext(ctx, path, "status", fmt.Sprintf("--format={{.%s}}", key), "-p", profile)) + if err != nil { + t.Logf("status error: %v (may be ok)", err) + } + return strings.TrimSpace(rr.Stdout.String()) +} + func TestStartStop(t *testing.T) { MaybeParallel(t) @@ -90,44 +100,14 @@ func TestStartStop(t *testing.T) { startArgs := append([]string{"start", "-p", profile, "--alsologtostderr", "-v=3", "--wait=true"}, tc.args...) startArgs = append(startArgs, StartArgs()...) startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", tc.version)) + rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - // Fatal so that we may collect logs before stop/delete steps t.Fatalf("%s failed: %v", rr.Args, err) } - // SADNESS: 0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate. - if strings.Contains(tc.name, "cni") { - t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(") - } else { - // schedule a pod to assert persistence - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) - if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) - } - - // 8 minutes, because 4 is not enough for images to pull in all cases. - names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", 8*time.Minute) - if err != nil { - t.Fatalf("wait: %v", err) - } - - // Use this pod to confirm that the runtime resource limits are sane - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "/bin/sh", "-c", "ulimit -n")) - if err != nil { - t.Fatalf("ulimit: %v", err) - } - - got, err := strconv.ParseInt(strings.TrimSpace(rr.Stdout.String()), 10, 64) - if err != nil { - t.Errorf("ParseInt(%q): %v", rr.Stdout.String(), err) - } - - // Arbitrary value set by some container runtimes. If higher, apps like MySQL may make bad decisions. - expected := int64(1048576) - if got != expected { - t.Errorf("'ulimit -n' returned %d, expected %d", got, expected) - } + if !strings.Contains(tc.name, "cni") { + testPodScheduling(ctx, t, profile) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3")) @@ -135,7 +115,7 @@ func TestStartStop(t *testing.T) { t.Errorf("%s failed: %v", rr.Args, err) } - got := Status(ctx, t, Target(), profile) + got := status(ctx, t, Target(), profile, "Host") if got != state.Stopped.String() { t.Errorf("status = %q; want = %q", got, state.Stopped) } @@ -146,52 +126,23 @@ func TestStartStop(t *testing.T) { t.Fatalf("%s failed: %v", rr.Args, err) } - // Make sure that kubeadm did not need to pull in additional images - if !NoneDriver() { - rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "sudo crictl images -o json")) - if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) - } - jv := map[string][]struct { - Tags []string `json:"repoTags"` - }{} - err = json.Unmarshal(rr.Stdout.Bytes(), &jv) - if err != nil { - t.Errorf("images unmarshal: %v", err) - } - gotImages := []string{} - for _, img := range jv["images"] { - for _, i := range img.Tags { - if defaultImage(i) { - // Remove docker.io for naming consistency between container runtimes - gotImages = append(gotImages, strings.TrimPrefix(i, "docker.io/")) - } else { - t.Logf("Found non-minikube image: %s", i) - } - } - } - want, err := images.Kubeadm("", tc.version) - if err != nil { - t.Errorf("kubeadm images: %v", tc.version) - } - sort.Strings(want) - sort.Strings(gotImages) - if diff := cmp.Diff(want, gotImages); diff != "" { - t.Errorf("%s images mismatch (-want +got):\n%s", tc.version, diff) - } - } - if strings.Contains(tc.name, "cni") { t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(") } else if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", 4*time.Minute); err != nil { t.Fatalf("wait: %v", err) } - got = Status(ctx, t, Target(), profile) + got = status(ctx, t, Target(), profile, "Host") if got != state.Running.String() { - t.Errorf("status = %q; want = %q", got, state.Running) + t.Errorf("host status = %q; want = %q", got, state.Running) } + if !NoneDriver() { + testPulledImages(ctx, t, profile, tc.version) + } + + testPause(ctx, t, profile) + if *cleanup { // Normally handled by cleanuprofile, but not fatal there rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) @@ -204,6 +155,113 @@ func TestStartStop(t *testing.T) { }) } +// testPodScheduling asserts that this configuration can schedule new pods +func testPodScheduling(ctx context.Context, t *testing.T, profile string) { + t.Helper() + + // schedule a pod to assert persistence + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) + if err != nil { + t.Fatalf("%s failed: %v", rr.Args, err) + } + + // 8 minutes, because 4 is not enough for images to pull in all cases. + names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", 8*time.Minute) + if err != nil { + t.Fatalf("wait: %v", err) + } + + // Use this pod to confirm that the runtime resource limits are sane + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "/bin/sh", "-c", "ulimit -n")) + if err != nil { + t.Fatalf("ulimit: %v", err) + } + + got, err := strconv.ParseInt(strings.TrimSpace(rr.Stdout.String()), 10, 64) + if err != nil { + t.Errorf("ParseInt(%q): %v", rr.Stdout.String(), err) + } + + // Arbitrary value set by some container runtimes. If higher, apps like MySQL may make bad decisions. + expected := int64(1048576) + if got != expected { + t.Errorf("'ulimit -n' returned %d, expected %d", got, expected) + } +} + +// testPulledImages asserts that this configuration pulls only expected images +func testPulledImages(ctx context.Context, t *testing.T, profile string, version string) { + t.Helper() + + rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "sudo crictl images -o json")) + if err != nil { + t.Errorf("%s failed: %v", rr.Args, err) + } + jv := map[string][]struct { + Tags []string `json:"repoTags"` + }{} + err = json.Unmarshal(rr.Stdout.Bytes(), &jv) + if err != nil { + t.Errorf("images unmarshal: %v", err) + } + gotImages := []string{} + for _, img := range jv["images"] { + for _, i := range img.Tags { + if defaultImage(i) { + // Remove docker.io for naming consistency between container runtimes + gotImages = append(gotImages, strings.TrimPrefix(i, "docker.io/")) + } else { + t.Logf("Found non-minikube image: %s", i) + } + } + } + want, err := images.Kubeadm("", version) + if err != nil { + t.Errorf("kubeadm images: %v", version) + } + sort.Strings(want) + sort.Strings(gotImages) + if diff := cmp.Diff(want, gotImages); diff != "" { + t.Errorf("%s images mismatch (-want +got):\n%s", version, diff) + } +} + +// testPause asserts that this configuration can be paused and unpaused +func testPause(ctx context.Context, t *testing.T, profile string) { + t.Helper() + + rr, err := Run(t, exec.CommandContext(ctx, Target(), "pause", "-p", profile, "--alsologtostderr", "-v=1")) + if err != nil { + t.Fatalf("%s failed: %v", rr.Args, err) + } + + got := status(ctx, t, Target(), profile, "APIServer") + if got != state.Paused.String() { + t.Errorf("apiserver status = %q; want = %q", got, state.Paused) + } + + got = status(ctx, t, Target(), profile, "Kubelet") + if got != state.Stopped.String() { + t.Errorf("kubelet status = %q; want = %q", got, state.Stopped) + } + + rr, err = Run(t, exec.CommandContext(ctx, Target(), "unpause", "-p", profile, "--alsologtostderr", "-v=1")) + if err != nil { + t.Fatalf("%s failed: %v", rr.Args, err) + } + + got = status(ctx, t, Target(), profile, "APIServer") + if got != state.Running.String() { + t.Errorf("apiserver status = %q; want = %q", got, state.Running) + } + + got = status(ctx, t, Target(), profile, "Kubelet") + if got != state.Running.String() { + t.Errorf("kubelet status = %q; want = %q", got, state.Running) + } + +} + // defaultImage returns true if this image is expected in a default minikube install func defaultImage(name string) bool { if strings.Contains(name, ":latest") {